code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : int ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : Optional[int],__A : int,__A : Optional[int]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : str = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : int = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,) | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Optional[Any],__A : bool = True,__A : Dict[str, int] = None,__A : int = 3_2,__A : bool = True,__A : Union[int, float] = 1 / 2_5_5,__A : bool = True,__A : bool = True,__A : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073],__A : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711],__A : bool = True,__A : Optional[Any]=7,__A : Union[str, Any]=3_0,__A : Any=4_0_0,__A : Optional[Any]=3,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : Tuple = size if size is not None else {"shortest_edge": 2_8_8}
_lowerCamelCase : int = size_divisor
_lowerCamelCase : List[str] = do_rescale
_lowerCamelCase : Tuple = rescale_factor
_lowerCamelCase : int = do_normalize
_lowerCamelCase : str = do_center_crop
_lowerCamelCase : Optional[Any] = image_mean
_lowerCamelCase : Any = image_std
_lowerCamelCase : Tuple = do_pad
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : List[Any] = max_resolution
def lowerCamelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[Any]=False ):
if not batched:
_lowerCamelCase : Any = self.size["shortest_edge"]
_lowerCamelCase : List[str] = image_inputs[0]
if isinstance(__A,Image.Image ):
_lowerCamelCase , _lowerCamelCase : List[str] = image.size
else:
_lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2]
_lowerCamelCase : List[str] = size / min(__A,__A )
if h < w:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = size, scale * w
else:
_lowerCamelCase , _lowerCamelCase : Optional[int] = scale * h, size
_lowerCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(__A,__A ) > max_size:
_lowerCamelCase : int = max_size / max(__A,__A )
_lowerCamelCase : int = newh * scale
_lowerCamelCase : Optional[int] = neww * scale
_lowerCamelCase , _lowerCamelCase : List[str] = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCamelCase : Union[str, Any] = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Any = max(__A,key=lambda __A : item[0] )[0]
_lowerCamelCase : Dict = max(__A,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"size_divisor" ) )
def lowerCamelCase_ ( self : int ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image processor
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
_lowerCamelCase : Tuple = image_processing(__A,return_tensors="pt" ).pixel_values
_lowerCamelCase , _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__A,batched=__A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def lowerCamelCase_ ( self : Dict ):
# Initialize image processor
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : List[str] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
_lowerCamelCase , _lowerCamelCase : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__A,return_tensors="pt" ).pixel_values
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(__A,batched=__A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image processor
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
_lowerCamelCase : Dict = image_processing(__A,return_tensors="pt" ).pixel_values
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__A,batched=__A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),) | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : Optional[Any] = tuple[int, int, int]
UpperCAmelCase_ : str = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase_ : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase_ : Union[str, Any] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
UpperCAmelCase_ : Dict = 'FOBHMDKEXQNRAULPGSJVTYICZW'
UpperCAmelCase_ : Optional[int] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
UpperCAmelCase_ : Any = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase_ : List[str] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
UpperCAmelCase_ : Any = 'SGLCPQWZHKXAREONTFBVIYJUDM'
UpperCAmelCase_ : int = 'HVSICLTYKQUBXDWAJZOMFGPREN'
UpperCAmelCase_ : Tuple = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
UpperCAmelCase_ : str = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
UpperCAmelCase_ : str = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def A_ ( _lowerCAmelCase : RotorPositionT , _lowerCAmelCase : RotorSelectionT , _lowerCAmelCase : str ):
"""simple docstring"""
if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3:
_lowerCamelCase : List[str] = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_lowerCAmelCase )
# Checks if rotor positions are valid
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = rotpos
if not 0 < rotorposa <= len(_lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
_lowerCamelCase : Optional[int] = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCAmelCase )
# Validates string and returns dict
_lowerCamelCase : str = _plugboard(_lowerCAmelCase )
return rotpos, rotsel, pbdict
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : List[str] = F'Plugboard setting isn\'t type string ({type(_lowerCAmelCase )})'
raise TypeError(_lowerCAmelCase )
elif len(_lowerCAmelCase ) % 2 != 0:
_lowerCamelCase : Optional[int] = F'Odd number of symbols ({len(_lowerCAmelCase )})'
raise Exception(_lowerCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
_lowerCamelCase : Union[str, Any] = set()
for i in pbstring:
if i not in abc:
_lowerCamelCase : List[Any] = F'\'{i}\' not in list of symbols'
raise Exception(_lowerCAmelCase )
elif i in tmppbl:
_lowerCamelCase : Optional[int] = F'Duplicate symbol ({i})'
raise Exception(_lowerCAmelCase )
else:
tmppbl.add(_lowerCAmelCase )
del tmppbl
# Created the dictionary
_lowerCamelCase : List[Any] = {}
for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ):
_lowerCamelCase : List[Any] = pbstring[j + 1]
_lowerCamelCase : Union[str, Any] = pbstring[j]
return pb
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : RotorPositionT , _lowerCAmelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCAmelCase : str = "" , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = text.upper()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = _validator(
_lowerCAmelCase , _lowerCAmelCase , plugb.upper() )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = rotor_position
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCamelCase : List[Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCamelCase : int = plugboard[symbol]
# rotor ra --------------------------
_lowerCamelCase : int = abc.index(_lowerCAmelCase ) + rotorposa
_lowerCamelCase : Dict = rotora[index % len(_lowerCAmelCase )]
# rotor rb --------------------------
_lowerCamelCase : int = abc.index(_lowerCAmelCase ) + rotorposa
_lowerCamelCase : Union[str, Any] = rotora[index % len(_lowerCAmelCase )]
# rotor rc --------------------------
_lowerCamelCase : List[str] = abc.index(_lowerCAmelCase ) + rotorposa
_lowerCamelCase : int = rotora[index % len(_lowerCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCamelCase : List[str] = reflector[symbol]
# 2nd rotors
_lowerCamelCase : List[str] = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
_lowerCamelCase : Any = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
_lowerCamelCase : List[Any] = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCamelCase : List[str] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
_lowerCamelCase : List[str] = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
_lowerCamelCase : str = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
_lowerCamelCase : str = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : int = 'This is my Python script that emulates the Enigma machine from WWII.'
UpperCAmelCase_ : Union[str, Any] = (1, 1, 1)
UpperCAmelCase_ : int = 'pictures'
UpperCAmelCase_ : Tuple = (rotora, rotora, rotora)
UpperCAmelCase_ : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def A_ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Dict = sys.version_info >= (3, 10)
def A_ ( _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'titi'
lowerCAmelCase_ = 'toto'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'titi'
lowerCAmelCase_ = 'toto'
lowerCAmelCase_ = 42
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "toto"
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "toto"
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Tuple = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=A , metadata={'help': 'help message'} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[1, 2, 3] )
lowerCAmelCase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowerCAmelCase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field()
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default='toto' , metadata={'help': 'help message'} )
lowerCAmelCase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=A , metadata={'help': 'help message'} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int,__A : argparse.ArgumentParser,__A : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ),len(b._actions ) )
for x, y in zip(a._actions,b._actions ):
_lowerCamelCase : List[str] = {k: v for k, v in vars(__A ).items() if k != "container"}
_lowerCamelCase : List[Any] = {k: v for k, v in vars(__A ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices",__A ) and yy.get("choices",__A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__A ),yy["type"](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,required=__A )
expected.add_argument("--bar",type=__A,required=__A )
expected.add_argument("--baz",type=__A,required=__A )
expected.add_argument("--flag",type=__A,default=__A,const=__A,nargs="?" )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Any = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((_lowerCamelCase) , ) : Tuple = parser.parse_args_into_dataclasses(__A,look_for_args_file=__A )
self.assertFalse(example.flag )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo",default=4_2,type=__A )
expected.add_argument("--baz",default="toto",type=__A,help="help message" )
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,default=__A,const=__A,nargs="?" )
expected.add_argument("--baz",type=__A,default=__A,const=__A,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz",action="store_false",default=__A,dest="baz" )
expected.add_argument("--opt",type=__A,default=__A )
_lowerCamelCase : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCamelCase : str = HfArgumentParser(__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Dict = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Optional[int] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : List[str] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = HfArgumentParser(__A )
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo",default="toto",choices=["titi", "toto", 4_2],type=make_choice_type_function(["titi", "toto", 4_2] ),)
self.argparsersEqual(__A,__A )
_lowerCamelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo,"toto" )
_lowerCamelCase : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.toto )
_lowerCamelCase : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo,"titi" )
_lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.titi )
_lowerCamelCase : Optional[int] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo,4_2 )
_lowerCamelCase : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.fourtytwo )
def lowerCamelCase_ ( self : Union[str, Any] ):
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "toto"
_lowerCamelCase : int = HfArgumentParser(__A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument(
"--foo",default="toto",choices=("titi", "toto", 4_2),type=make_choice_type_function(["titi", "toto", 4_2] ),)
self.argparsersEqual(__A,__A )
_lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(args.foo,"toto" )
_lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo,"titi" )
_lowerCamelCase : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo,4_2 )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo_int",nargs="+",default=[],type=__A )
expected.add_argument("--bar_int",nargs="+",default=[1, 2, 3],type=__A )
expected.add_argument("--foo_str",nargs="+",default=["Hallo", "Bonjour", "Hello"],type=__A )
expected.add_argument("--foo_float",nargs="+",default=[0.1, 0.2, 0.3],type=__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__A,Namespace(foo_int=[],bar_int=[1, 2, 3],foo_str=["Hallo", "Bonjour", "Hello"],foo_float=[0.1, 0.2, 0.3] ),)
_lowerCamelCase : Any = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__A,Namespace(foo_int=[1],bar_int=[2, 3],foo_str=["a", "b", "c"],foo_float=[0.1, 0.7] ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo",default=__A,type=__A )
expected.add_argument("--bar",default=__A,type=__A,help="help message" )
expected.add_argument("--baz",default=__A,type=__A )
expected.add_argument("--ces",nargs="+",default=[],type=__A )
expected.add_argument("--des",nargs="+",default=[],type=__A )
_lowerCamelCase : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCamelCase : Any = HfArgumentParser(__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(__A,Namespace(foo=__A,bar=__A,baz=__A,ces=[],des=[] ) )
_lowerCamelCase : Any = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__A,Namespace(foo=1_2,bar=3.14,baz="42",ces=["a", "b", "c"],des=[1, 2, 3] ) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = HfArgumentParser(__A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument("--required_list",nargs="+",type=__A,required=__A )
expected.add_argument("--required_str",type=__A,required=__A )
expected.add_argument(
"--required_enum",type=make_choice_type_function(["titi", "toto"] ),choices=["titi", "toto"],required=__A,)
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,required=__A )
expected.add_argument(
"--required_enum",type=make_choice_type_function(["titi", "toto"] ),choices=["titi", "toto"],required=__A,)
expected.add_argument("--opt",type=__A,default=__A )
expected.add_argument("--baz",default="toto",type=__A,help="help message" )
expected.add_argument("--foo_str",nargs="+",default=["Hallo", "Bonjour", "Hello"],type=__A )
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = HfArgumentParser(__A )
_lowerCamelCase : Optional[Any] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
_lowerCamelCase : Tuple = parser.parse_dict(__A )[0]
_lowerCamelCase : str = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = HfArgumentParser(__A )
_lowerCamelCase : str = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 4_2,
}
self.assertRaises(__A,parser.parse_dict,__A,allow_extra_keys=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : List[str] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict = os.path.join(__A,"temp_json" )
os.mkdir(__A )
with open(temp_local_path + ".json","w+" ) as f:
json.dump(__A,__A )
_lowerCamelCase : Tuple = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
_lowerCamelCase : List[Any] = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Union[str, Any] = HfArgumentParser(__A )
_lowerCamelCase : List[str] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Tuple = os.path.join(__A,"temp_yaml" )
os.mkdir(__A )
with open(temp_local_path + ".yaml","w+" ) as f:
yaml.dump(__A,__A )
_lowerCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
_lowerCamelCase : Optional[int] = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = HfArgumentParser(__A )
self.assertIsNotNone(__A ) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : Tuple,__A : int = 1_6,__A : int = 8_8,__A : Optional[int] = None,__A : int = 1,__A : float = 0.0,__A : int = 3_2,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[int] = None,__A : str = "geglu",__A : Optional[int] = None,):
super().__init__()
_lowerCamelCase : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__A,attention_head_dim=__A,in_channels=__A,num_layers=__A,dropout=__A,norm_num_groups=__A,cross_attention_dim=__A,attention_bias=__A,sample_size=__A,num_vector_embeds=__A,activation_fn=__A,num_embeds_ada_norm=__A,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowerCamelCase : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowerCamelCase : Dict = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowerCamelCase : int = [1, 0]
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : List[str],__A : Dict=None,__A : Dict=None,__A : Tuple=None,__A : bool = True,):
_lowerCamelCase : Optional[Any] = hidden_states
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowerCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowerCamelCase : List[str] = self.transformer_index_for_condition[i]
_lowerCamelCase : Optional[int] = self.transformers[transformer_index](
__A,encoder_hidden_states=__A,timestep=__A,cross_attention_kwargs=__A,return_dict=__A,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowerCamelCase : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__A ) | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase_ : Any = logging.get_logger(__name__)
@add_end_docstrings(
A , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Dict,__A : GenericTensor ):
if self.framework == "tf":
_lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_lowerCamelCase : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=__A )
else:
raise ValueError("Unsupported framework" )
return masked_index
def lowerCamelCase_ ( self : Dict,__A : GenericTensor ):
_lowerCamelCase : List[Any] = self.get_masked_index(__A )
_lowerCamelCase : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask",self.model.base_model_prefix,f'No mask_token ({self.tokenizer.mask_token}) found on the input',)
def lowerCamelCase_ ( self : Optional[Any],__A : GenericTensor ):
if isinstance(__A,__A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__A )
def lowerCamelCase_ ( self : List[str],__A : Tuple,__A : List[Any]=None,**__A : str ):
if return_tensors is None:
_lowerCamelCase : Optional[int] = self.framework
_lowerCamelCase : List[Any] = self.tokenizer(__A,return_tensors=__A )
self.ensure_exactly_one_mask_token(__A )
return model_inputs
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any] ):
_lowerCamelCase : Any = self.model(**__A )
_lowerCamelCase : Optional[Any] = model_inputs["input_ids"]
return model_outputs
def lowerCamelCase_ ( self : List[str],__A : int,__A : Optional[Any]=5,__A : int=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_lowerCamelCase : str = target_ids.shape[0]
_lowerCamelCase : int = model_outputs["input_ids"][0]
_lowerCamelCase : int = model_outputs["logits"]
if self.framework == "tf":
_lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_lowerCamelCase : List[Any] = outputs.numpy()
_lowerCamelCase : List[str] = outputs[0, masked_index, :]
_lowerCamelCase : Dict = stable_softmax(__A,axis=-1 )
if target_ids is not None:
_lowerCamelCase : Dict = tf.gather_nd(tf.squeeze(__A,0 ),target_ids.reshape(-1,1 ) )
_lowerCamelCase : int = tf.expand_dims(__A,0 )
_lowerCamelCase : Tuple = tf.math.top_k(__A,k=__A )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
_lowerCamelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=__A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_lowerCamelCase : List[str] = outputs[0, masked_index, :]
_lowerCamelCase : Optional[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_lowerCamelCase : List[Any] = probs[..., target_ids]
_lowerCamelCase , _lowerCamelCase : Optional[int] = probs.topk(__A )
_lowerCamelCase : int = []
_lowerCamelCase : Tuple = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(),predictions.tolist() ) ):
_lowerCamelCase : List[Any] = []
for v, p in zip(_values,_predictions ):
# Copy is important since we're going to modify this array in place
_lowerCamelCase : Optional[Any] = input_ids.numpy().copy()
if target_ids is not None:
_lowerCamelCase : List[str] = target_ids[p].tolist()
_lowerCamelCase : Optional[int] = p
# Filter padding out:
_lowerCamelCase : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowerCamelCase : str = self.tokenizer.decode(__A,skip_special_tokens=__A )
_lowerCamelCase : Any = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__A )
result.append(__A )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Optional[Any]=None ):
if isinstance(__A,__A ):
_lowerCamelCase : Tuple = [targets]
try:
_lowerCamelCase : Dict = self.tokenizer.get_vocab()
except Exception:
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[str] = []
for target in targets:
_lowerCamelCase : Dict = vocab.get(__A,__A )
if id_ is None:
_lowerCamelCase : int = self.tokenizer(
__A,add_special_tokens=__A,return_attention_mask=__A,return_token_type_ids=__A,max_length=1,truncation=__A,)["input_ids"]
if len(__A ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
_lowerCamelCase : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
_lowerCamelCase : Any = list(set(__A ) )
if len(__A ) == 0:
raise ValueError("At least one target must be provided when passed." )
_lowerCamelCase : List[Any] = np.array(__A )
return target_ids
def lowerCamelCase_ ( self : str,__A : Any=None,__A : int=None ):
_lowerCamelCase : List[str] = {}
if targets is not None:
_lowerCamelCase : Optional[int] = self.get_target_ids(__A,__A )
_lowerCamelCase : Union[str, Any] = target_ids
if top_k is not None:
_lowerCamelCase : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask",self.model.base_model_prefix,"The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int,__A : List[str],*__A : Dict,**__A : List[str] ):
_lowerCamelCase : List[str] = super().__call__(__A,**__A )
if isinstance(__A,__A ) and len(__A ) == 1:
return outputs[0]
return outputs | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Optional[int] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Union[str, Any],__A : Union[str, Any]=1_3,__A : Union[str, Any]=7,__A : Tuple=False,__A : int=True,__A : int=False,__A : List[str]=False,__A : int=1_9,__A : Optional[Any]=3_2,__A : int=5,__A : str=4,__A : Optional[int]=3_7,__A : Dict="gelu",__A : Union[str, Any]=0.1,__A : List[str]=0.1,__A : Optional[Any]=5_1_2,__A : Optional[Any]=1_6,__A : Union[str, Any]=2,__A : Union[str, Any]=0.02,__A : str=3,__A : Optional[Any]=4,__A : str=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : Tuple = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Dict = scope
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = EsmConfig(
vocab_size=3_3,hidden_size=self.hidden_size,pad_token_id=1,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,is_folding_model=__A,esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False},)
return config
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : int,__A : List[Any],__A : Tuple,__A : int,__A : Union[str, Any] ):
_lowerCamelCase : int = EsmForProteinFolding(config=__A ).float()
model.to(__A )
model.eval()
_lowerCamelCase : int = model(__A,attention_mask=__A )
_lowerCamelCase : Optional[int] = model(__A )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.positions.shape,(8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape,(8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Dict = config_and_inputs
_lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase_ = ()
lowerCAmelCase_ = {} if is_torch_available() else {}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = EsmFoldModelTester(self )
_lowerCamelCase : str = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip("Does not support attention outputs" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : int ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@require_torch
class UpperCAmelCase__ ( A ):
@slow
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_lowerCamelCase : Dict = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_lowerCamelCase : Tuple = model(__A )["positions"]
_lowerCamelCase : Optional[int] = torch.tensor([2.5828, 0.7993, -10.9334],dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0],__A,atol=1e-4 ) ) | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase_ : List[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase_ : Union[str, Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase_ : List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase_ : Optional[int] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase_ : Tuple = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for tf_name, hf_name in patterns:
_lowerCamelCase : int = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def A_ ( _lowerCAmelCase : dict , _lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : Dict = BigBirdPegasusConfig(**_lowerCAmelCase )
_lowerCamelCase : Tuple = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch_model.state_dict()
_lowerCamelCase : Optional[int] = {}
# separating decoder weights
_lowerCamelCase : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_lowerCamelCase : int = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Any = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
_lowerCamelCase : Any = DECODER_PATTERNS
_lowerCamelCase : List[str] = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : Union[str, Any] = v.T
_lowerCamelCase : List[Any] = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Optional[int] = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
_lowerCamelCase : str = REMAINING_PATTERNS
_lowerCamelCase : str = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : int = v.T
_lowerCamelCase : Optional[Any] = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_lowerCamelCase : Dict = mapping["model.embed_positions.weight"]
_lowerCamelCase : Union[str, Any] = mapping.pop("model.embed_positions.weight" )
_lowerCamelCase , _lowerCamelCase : List[str] = torch_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = tf.train.list_variables(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = ["global_step"]
for name, shape in tqdm(_lowerCAmelCase , desc="converting tf checkpoint to dict" ):
_lowerCamelCase : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase : Any = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : List[Any] = array
return tf_weights
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : str = get_tf_weights_as_numpy(_lowerCAmelCase )
_lowerCamelCase : Tuple = convert_bigbird_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : str = parser.parse_args()
UpperCAmelCase_ : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ : int = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : str = 256
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
if len(_lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
_lowerCamelCase : str = MinHash(num_perm=_lowerCAmelCase )
for token in set(_lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_lowerCAmelCase ) if len(t.strip() ) > 0}
class UpperCAmelCase__ :
def __init__( self : List[Any],*,
__A : float = 0.85,):
_lowerCamelCase : Any = duplication_jaccard_threshold
_lowerCamelCase : Dict = NUM_PERM
_lowerCamelCase : int = MinHashLSH(threshold=self._duplication_jaccard_threshold,num_perm=self._num_perm )
_lowerCamelCase : List[Any] = defaultdict(__A )
def lowerCamelCase_ ( self : Dict,__A : Tuple,__A : MinHash ):
_lowerCamelCase : Optional[int] = self._index.query(__A )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(__A,__A )
if len(__A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Union[str, Any] = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCamelCase : List[str] = [base] + list(__A )
# reformat the cluster to be a list of dict
_lowerCamelCase : Any = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(__A )
return duplicate_clusters
def lowerCamelCase_ ( self : Optional[Any],__A : Optional[Any] ):
_lowerCamelCase : List[str] = self.get_duplicate_clusters()
with open(__A,"w" ) as f:
json.dump(__A,__A )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = element
_lowerCamelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def A_ ( _lowerCAmelCase : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowerCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def A_ ( _lowerCAmelCase : Type[Dataset] , _lowerCAmelCase : float ):
"""simple docstring"""
_lowerCamelCase : List[str] = DuplicationIndex(duplication_jaccard_threshold=_lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(_lowerCAmelCase , _lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Any = get_tokens(_lowerCAmelCase )
_lowerCamelCase : Any = get_tokens(_lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ : Dict = None
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for elementa in cluster:
_lowerCamelCase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
_lowerCamelCase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_lowerCAmelCase , _lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCamelCase : Optional[Any] = 1
extremes.append(_lowerCAmelCase )
return extremes
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
global _shared_dataset
_lowerCamelCase : List[Any] = dataset
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCAmelCase , _lowerCAmelCase , ) , total=len(_lowerCAmelCase ) , ):
extremes_list.append(_lowerCAmelCase )
return extremes_list
def A_ ( _lowerCAmelCase : Type[Dataset] , _lowerCAmelCase : float = 0.8_5 ):
"""simple docstring"""
_lowerCamelCase : Dict = make_duplicate_clusters(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : str = find_extremes(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCamelCase : Tuple = element
_lowerCamelCase : str = duplicate_indices - set(extreme_dict.keys() )
_lowerCamelCase : List[Any] = dataset.filter(lambda _lowerCAmelCase , _lowerCAmelCase : idx not in remove_indices , with_indices=_lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCamelCase : Optional[int] = element["base_index"] in extreme_dict
if element["is_extreme"]:
_lowerCamelCase : Dict = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(_lowerCAmelCase )}' )
print(F'Number of duplicate clusters: {len(_lowerCAmelCase )}' )
print(F'Files in duplicate cluster: {len(_lowerCAmelCase )}' )
print(F'Unique files in duplicate cluster: {len(_lowerCAmelCase )}' )
print(F'Filtered dataset size: {len(_lowerCAmelCase )}' )
return ds_filter, duplicate_clusters | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase_ : List[Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
UpperCAmelCase_ : Dict = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
UpperCAmelCase_ : Tuple = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ),homepage="https://github.com/hendrycks/math",codebase_urls=["https://github.com/hendrycks/math"],)
def lowerCamelCase_ ( self : str,__A : List[Any],__A : List[str] ):
_lowerCamelCase : int = 0.0
for i, j in zip(__A,__A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A,__A ) else 0.0
_lowerCamelCase : Tuple = n_correct / len(__A )
return {
"accuracy": accuracy,
} | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase_ : Optional[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
UpperCAmelCase_ : List[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
UpperCAmelCase_ : int = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(self._get_feature_types() ),reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
],)
def lowerCamelCase_ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Any,__A : Optional[int]=None,__A : List[str]="uniform_average",__A : List[Any]=True ):
_lowerCamelCase : int = mean_squared_error(
__A,__A,sample_weight=__A,multioutput=__A,squared=__A )
return {"mse": mse} | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : Optional[Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = GPTaTokenizer
def __init__( self : Optional[int],__A : str=None,__A : Tuple=None,__A : Union[str, Any]=None,__A : str="<|endoftext|>",__A : List[Any]="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Dict=False,**__A : Union[str, Any],):
super().__init__(
__A,__A,tokenizer_file=__A,unk_token=__A,bos_token=__A,eos_token=__A,add_prefix_space=__A,**__A,)
_lowerCamelCase : Any = kwargs.pop("add_bos_token",__A )
_lowerCamelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : int = add_prefix_space
_lowerCamelCase : Union[str, Any] = pre_tok_class(**__A )
_lowerCamelCase : Tuple = add_prefix_space
def lowerCamelCase_ ( self : Tuple,*__A : Tuple,**__A : List[Any] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : List[Any],*__A : Optional[int],**__A : Optional[Any] ):
_lowerCamelCase : Optional[Any] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
_lowerCamelCase : int = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[Any],__A : "Conversation" ):
_lowerCamelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A,add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'deta'
lowerCAmelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any],__A : Union[str, Any]=None,__A : int=9_0_0,__A : Optional[int]=2_0_4_8,__A : Any=6,__A : str=2_0_4_8,__A : List[Any]=8,__A : Tuple=6,__A : Optional[Any]=1_0_2_4,__A : Optional[int]=8,__A : Union[str, Any]=0.0,__A : Optional[Any]=True,__A : Any="relu",__A : Dict=2_5_6,__A : int=0.1,__A : Dict=0.0,__A : Optional[int]=0.0,__A : Dict=0.02,__A : List[Any]=1.0,__A : Tuple=True,__A : int=False,__A : Optional[int]="sine",__A : Optional[int]=5,__A : List[Any]=4,__A : int=4,__A : Optional[Any]=True,__A : Optional[Any]=3_0_0,__A : Any=True,__A : Tuple=True,__A : Optional[Any]=1,__A : List[Any]=5,__A : List[str]=2,__A : Tuple=1,__A : int=1,__A : Tuple=5,__A : Optional[int]=2,__A : Optional[Any]=0.1,__A : Dict=0.25,**__A : List[str],):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : Dict = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__A,__A ):
_lowerCamelCase : Dict = backbone_config.pop("model_type" )
_lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[Any] = config_class.from_dict(__A )
_lowerCamelCase : List[str] = backbone_config
_lowerCamelCase : Optional[int] = num_queries
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Any = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Tuple = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : List[Any] = decoder_layers
_lowerCamelCase : List[str] = decoder_attention_heads
_lowerCamelCase : Tuple = dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Tuple = activation_dropout
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : int = init_std
_lowerCamelCase : Optional[Any] = init_xavier_std
_lowerCamelCase : Tuple = encoder_layerdrop
_lowerCamelCase : Tuple = auxiliary_loss
_lowerCamelCase : Optional[Any] = position_embedding_type
# deformable attributes
_lowerCamelCase : List[str] = num_feature_levels
_lowerCamelCase : Optional[Any] = encoder_n_points
_lowerCamelCase : str = decoder_n_points
_lowerCamelCase : Any = two_stage
_lowerCamelCase : List[Any] = two_stage_num_proposals
_lowerCamelCase : int = with_box_refine
_lowerCamelCase : Dict = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_lowerCamelCase : Optional[Any] = class_cost
_lowerCamelCase : List[Any] = bbox_cost
_lowerCamelCase : List[str] = giou_cost
# Loss coefficients
_lowerCamelCase : str = mask_loss_coefficient
_lowerCamelCase : Union[str, Any] = dice_loss_coefficient
_lowerCamelCase : Optional[int] = bbox_loss_coefficient
_lowerCamelCase : List[Any] = giou_loss_coefficient
_lowerCamelCase : Dict = eos_coefficient
_lowerCamelCase : int = focal_alpha
super().__init__(is_encoder_decoder=__A,**__A )
@property
def lowerCamelCase_ ( self : str ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : str ):
return self.d_model
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = copy.deepcopy(self.__dict__ )
_lowerCamelCase : List[Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'levit'
def __init__( self : Optional[Any],__A : List[Any]=2_2_4,__A : Tuple=3,__A : List[Any]=3,__A : Tuple=2,__A : Optional[int]=1,__A : Optional[Any]=1_6,__A : Any=[1_2_8, 2_5_6, 3_8_4],__A : List[Any]=[4, 8, 1_2],__A : Union[str, Any]=[4, 4, 4],__A : Optional[int]=[1_6, 1_6, 1_6],__A : int=0,__A : Dict=[2, 2, 2],__A : Any=[2, 2, 2],__A : str=0.02,**__A : Optional[Any],):
super().__init__(**__A )
_lowerCamelCase : int = image_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = kernel_size
_lowerCamelCase : int = stride
_lowerCamelCase : List[str] = padding
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Any = depths
_lowerCamelCase : List[Any] = key_dim
_lowerCamelCase : Optional[Any] = drop_path_rate
_lowerCamelCase : str = patch_size
_lowerCamelCase : Union[str, Any] = attention_ratio
_lowerCamelCase : Any = mlp_ratio
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
return 1e-4 | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : int ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_lowerCAmelCase ) , _lowerCAmelCase )
return number - int(_lowerCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = 1
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
return self.__class__(**{k: copy.deepcopy(__A ) for k, v in self.__dict__.items()} ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'instructblip_vision_model'
def __init__( self : List[Any],__A : Dict=1_4_0_8,__A : Dict=6_1_4_4,__A : Any=3_9,__A : Optional[int]=1_6,__A : Any=2_2_4,__A : List[str]=1_4,__A : Any="gelu",__A : int=1e-6,__A : str=0.0,__A : Dict=1e-10,__A : List[Any]=True,**__A : List[Any],):
super().__init__(**__A )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[int] = qkv_bias
@classmethod
def lowerCamelCase_ ( cls : Dict,__A : Union[str, os.PathLike],**__A : List[Any] ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase , _lowerCamelCase : Tuple = cls.get_config_dict(__A,**__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_lowerCamelCase : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'instructblip_qformer'
def __init__( self : int,__A : Union[str, Any]=3_0_5_2_2,__A : Any=7_6_8,__A : Union[str, Any]=1_2,__A : List[str]=1_2,__A : Optional[Any]=3_0_7_2,__A : Dict="gelu",__A : Tuple=0.1,__A : str=0.1,__A : Union[str, Any]=5_1_2,__A : Optional[int]=0.02,__A : List[Any]=1e-12,__A : str=0,__A : Union[str, Any]="absolute",__A : str=2,__A : List[Any]=1_4_0_8,**__A : Optional[Any],):
super().__init__(pad_token_id=__A,**__A )
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : Optional[int] = cross_attention_frequency
_lowerCamelCase : Dict = encoder_hidden_size
@classmethod
def lowerCamelCase_ ( cls : Dict,__A : Union[str, os.PathLike],**__A : Tuple ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase , _lowerCamelCase : Optional[int] = cls.get_config_dict(__A,**__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_lowerCamelCase : Union[str, Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'instructblip'
lowerCAmelCase_ = True
def __init__( self : Any,__A : Tuple=None,__A : Dict=None,__A : Union[str, Any]=None,__A : Dict=3_2,**__A : Tuple ):
super().__init__(**__A )
if vision_config is None:
_lowerCamelCase : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_lowerCamelCase : Optional[int] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_lowerCamelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_lowerCamelCase : Dict = InstructBlipVisionConfig(**__A )
_lowerCamelCase : List[Any] = InstructBlipQFormerConfig(**__A )
_lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
_lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**__A )
_lowerCamelCase : Any = self.text_config.tie_word_embeddings
_lowerCamelCase : Optional[int] = self.text_config.is_encoder_decoder
_lowerCamelCase : Optional[int] = num_query_tokens
_lowerCamelCase : Optional[int] = self.vision_config.hidden_size
_lowerCamelCase : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCamelCase : Optional[Any] = 1.0
_lowerCamelCase : int = 0.02
@classmethod
def lowerCamelCase_ ( cls : Optional[int],__A : InstructBlipVisionConfig,__A : InstructBlipQFormerConfig,__A : PretrainedConfig,**__A : Tuple,):
return cls(
vision_config=vision_config.to_dict(),qformer_config=qformer_config.to_dict(),text_config=text_config.to_dict(),**__A,)
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : str = self.vision_config.to_dict()
_lowerCamelCase : List[str] = self.qformer_config.to_dict()
_lowerCamelCase : Tuple = self.text_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor', 'tokenizer']
lowerCAmelCase_ = 'OwlViTImageProcessor'
lowerCAmelCase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any],__A : str=None,__A : Optional[int]=None,**__A : Dict ):
_lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",__A,)
_lowerCamelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A,__A )
def __call__( self : Any,__A : Tuple=None,__A : Optional[Any]=None,__A : Optional[int]=None,__A : Union[str, Any]="max_length",__A : Dict="np",**__A : int ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__A,__A ) or (isinstance(__A,__A ) and not isinstance(text[0],__A )):
_lowerCamelCase : Tuple = [self.tokenizer(__A,padding=__A,return_tensors=__A,**__A )]
elif isinstance(__A,__A ) and isinstance(text[0],__A ):
_lowerCamelCase : Dict = []
# Maximum number of queries across batch
_lowerCamelCase : Optional[int] = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
_lowerCamelCase : Optional[Any] = t + [" "] * (max_num_queries - len(__A ))
_lowerCamelCase : Union[str, Any] = self.tokenizer(__A,padding=__A,return_tensors=__A,**__A )
encodings.append(__A )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowerCamelCase : int = np.concatenate([encoding["input_ids"] for encoding in encodings],axis=0 )
_lowerCamelCase : List[str] = np.concatenate([encoding["attention_mask"] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowerCamelCase : str = jnp.concatenate([encoding["input_ids"] for encoding in encodings],axis=0 )
_lowerCamelCase : List[str] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowerCamelCase : Tuple = torch.cat([encoding["input_ids"] for encoding in encodings],dim=0 )
_lowerCamelCase : List[str] = torch.cat([encoding["attention_mask"] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowerCamelCase : str = tf.stack([encoding["input_ids"] for encoding in encodings],axis=0 )
_lowerCamelCase : str = tf.stack([encoding["attention_mask"] for encoding in encodings],axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowerCamelCase : Tuple = BatchEncoding()
_lowerCamelCase : List[Any] = input_ids
_lowerCamelCase : Dict = attention_mask
if query_images is not None:
_lowerCamelCase : Tuple = BatchEncoding()
_lowerCamelCase : Optional[Any] = self.image_processor(
__A,return_tensors=__A,**__A ).pixel_values
_lowerCamelCase : Tuple = query_pixel_values
if images is not None:
_lowerCamelCase : Tuple = self.image_processor(__A,return_tensors=__A,**__A )
if text is not None and images is not None:
_lowerCamelCase : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ),tensor_type=__A )
def lowerCamelCase_ ( self : List[str],*__A : int,**__A : Tuple ):
return self.image_processor.post_process(*__A,**__A )
def lowerCamelCase_ ( self : Union[str, Any],*__A : List[str],**__A : List[str] ):
return self.image_processor.post_process_object_detection(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : int,**__A : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__A,**__A )
def lowerCamelCase_ ( self : Tuple,*__A : List[Any],**__A : Union[str, Any] ):
return self.tokenizer.batch_decode(*__A,**__A )
def lowerCamelCase_ ( self : Tuple,*__A : Tuple,**__A : Dict ):
return self.tokenizer.decode(*__A,**__A )
@property
def lowerCamelCase_ ( self : Dict ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",__A,)
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Any ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",__A,)
return self.image_processor | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase_ : Any = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase_ : Optional[int] = [0, 25, 50]
UpperCAmelCase_ : Dict = [25, 50, 75]
UpperCAmelCase_ : Union[str, Any] = fuzz.membership.trimf(X, abca)
UpperCAmelCase_ : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase_ : int = np.ones(75)
UpperCAmelCase_ : Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase_ : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase_ : Optional[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase_ : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase_ : Union[str, Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase_ : Dict = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase_ : List[str] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase_ : List[str] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase_ : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Tuple,**__A : Optional[int] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : Tuple = deprecated_arg[3:]
_lowerCamelCase : Tuple = not kwargs.pop(__A )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
_lowerCamelCase : Dict = kwargs.pop("tpu_name",self.tpu_name )
_lowerCamelCase : List[Any] = kwargs.pop("device_idx",self.device_idx )
_lowerCamelCase : Optional[int] = kwargs.pop("eager_mode",self.eager_mode )
_lowerCamelCase : Optional[Any] = kwargs.pop("use_xla",self.use_xla )
super().__init__(**__A )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Name of TPU'} , )
lowerCAmelCase_ = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Benchmark models in eager model.'} )
lowerCAmelCase_ = field(
default=A , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCamelCase_ ( self : List[str] ):
requires_backends(self,["tf"] )
_lowerCamelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase : Tuple = None
return tpu
@cached_property
def lowerCamelCase_ ( self : Any ):
requires_backends(self,["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx],"GPU" )
_lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([],"GPU" ) # disable GPU
_lowerCamelCase : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def lowerCamelCase_ ( self : str ):
requires_backends(self,["tf"] )
return self._setup_tpu is not None
@property
def lowerCamelCase_ ( self : Dict ):
requires_backends(self,["tf"] )
return self._setup_strategy
@property
def lowerCamelCase_ ( self : Dict ):
requires_backends(self,["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowerCamelCase_ ( self : Dict ):
requires_backends(self,["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self.n_gpu > 0 | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase__ :
def __init__( self : Dict,__A : Any,__A : str=1_3,__A : Optional[int]=7,__A : Tuple=True,__A : Optional[Any]=True,__A : Any=False,__A : List[Any]=True,__A : int=9_9,__A : List[str]=3_2,__A : Tuple=5,__A : Optional[Any]=4,__A : str=3_7,__A : Optional[Any]="gelu",__A : List[str]=0.1,__A : List[Any]=0.1,__A : str=5_1_2,__A : Optional[int]=1_6,__A : Dict=2,__A : Union[str, Any]=0.02,__A : Any=3,__A : Dict=4,__A : Any=None,):
_lowerCamelCase : Dict = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : List[Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : List[str] = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : int = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : str = num_choices
_lowerCamelCase : Union[str, Any] = scope
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : str = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Any = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int ):
return OpenLlamaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,use_stable_embedding=__A,)
def lowerCamelCase_ ( self : str,__A : Tuple,__A : str,__A : Union[str, Any],__A : List[Any],__A : List[str],__A : Optional[int],__A : str ):
_lowerCamelCase : Union[str, Any] = OpenLlamaModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Any = model(__A,attention_mask=__A )
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : int,__A : Union[str, Any],__A : Any,__A : List[Any],__A : str,__A : Union[str, Any],__A : Tuple,__A : List[str],):
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : str = OpenLlamaModel(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(
__A,attention_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,)
_lowerCamelCase : str = model(
__A,attention_mask=__A,encoder_hidden_states=__A,)
_lowerCamelCase : Dict = model(__A,attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[Any],__A : str,__A : Tuple,__A : List[str],__A : int,__A : Dict,__A : List[str],__A : Optional[int],__A : int,):
_lowerCamelCase : Any = OpenLlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Tuple = model(__A,attention_mask=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any,__A : List[str],__A : int,__A : int,__A : int,__A : Dict,__A : Optional[Any],__A : Optional[Any],__A : Union[str, Any],__A : List[str],):
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = OpenLlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
_lowerCamelCase : Tuple = model(
__A,attention_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,use_cache=__A,)
_lowerCamelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Dict = ids_tensor((self.batch_size, 3),config.vocab_size )
_lowerCamelCase : int = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : str = torch.cat([input_ids, next_tokens],dim=-1 )
_lowerCamelCase : Any = torch.cat([input_mask, next_mask],dim=-1 )
_lowerCamelCase : Any = model(
__A,attention_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,output_hidden_states=__A,)["hidden_states"][0]
_lowerCamelCase : Optional[int] = model(
__A,attention_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,past_key_values=__A,output_hidden_states=__A,)["hidden_states"][0]
# select random slice
_lowerCamelCase : Dict = ids_tensor((1,),output_from_past.shape[-1] ).item()
_lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A,__A,atol=1e-3 ) )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any = config_and_inputs
_lowerCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[str] = OpenLlamaModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : str = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Dict = input_dict["input_ids"]
_lowerCamelCase : List[str] = input_ids.ne(1 ).to(__A )
_lowerCamelCase : Optional[int] = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A,attention_mask=__A,labels=__A )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Any = "single_label_classification"
_lowerCamelCase : Tuple = input_dict["input_ids"]
_lowerCamelCase : Dict = input_ids.ne(1 ).to(__A )
_lowerCamelCase : Optional[int] = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Any = model(__A,attention_mask=__A,labels=__A )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = 3
_lowerCamelCase : List[Any] = "multi_label_classification"
_lowerCamelCase : Optional[Any] = input_dict["input_ids"]
_lowerCamelCase : Tuple = input_ids.ne(1 ).to(__A )
_lowerCamelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels],self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Optional[int] = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A,attention_mask=__A,labels=__A )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = ids_tensor([1, 1_0],config.vocab_size )
_lowerCamelCase : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Any = OpenLlamaModel(__A )
original_model.to(__A )
original_model.eval()
_lowerCamelCase : Optional[Any] = original_model(__A ).last_hidden_state
_lowerCamelCase : Dict = original_model(__A ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Tuple = {"type": scaling_type, "factor": 10.0}
_lowerCamelCase : Optional[Any] = OpenLlamaModel(__A )
scaled_model.to(__A )
scaled_model.eval()
_lowerCamelCase : List[str] = scaled_model(__A ).last_hidden_state
_lowerCamelCase : Optional[int] = scaled_model(__A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__A,__A,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__A,__A,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__A,__A,atol=1e-5 ) ) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = DistilBertTokenizer
lowerCAmelCase_ = DistilBertTokenizerFast
lowerCAmelCase_ = True
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_lowerCamelCase : str = tokenizer.encode("sequence builders",add_special_tokens=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode("multi-sequence build",add_special_tokens=__A )
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__A,__A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str="attention" ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
_lowerCamelCase : Any = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
_lowerCamelCase : Any = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
_lowerCamelCase : Optional[int] = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=False ):
"""simple docstring"""
if split_mlp_wi:
_lowerCamelCase : Optional[Any] = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
_lowerCamelCase : Dict = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
_lowerCamelCase : Union[str, Any] = (wi_a, wi_a)
else:
_lowerCamelCase : Tuple = params[F'{prefix}/layers_{i}/mlp/wi/kernel']
_lowerCamelCase : Tuple = params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
"""simple docstring"""
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def A_ ( _lowerCAmelCase : dict , *, _lowerCAmelCase : int , _lowerCAmelCase : bool ):
"""simple docstring"""
_lowerCamelCase : List[str] = traverse_util.flatten_dict(variables["target"] )
_lowerCamelCase : Dict = {"/".join(_lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCamelCase : Tuple = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , _lowerCAmelCase )
_lowerCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_lowerCamelCase : Optional[int] = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_lowerCamelCase : int = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "pre_attention_layer_norm" )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "attention" )
_lowerCamelCase : List[Any] = layer_norm
_lowerCamelCase : List[str] = k.T
_lowerCamelCase : Tuple = o.T
_lowerCamelCase : str = q.T
_lowerCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_lowerCamelCase : str = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "pre_mlp_layer_norm" )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , _lowerCAmelCase )
_lowerCamelCase : int = layer_norm
if split_mlp_wi:
_lowerCamelCase : Dict = wi[0].T
_lowerCamelCase : List[Any] = wi[1].T
else:
_lowerCamelCase : Optional[Any] = wi.T
_lowerCamelCase : Optional[int] = wo.T
_lowerCamelCase : Optional[Any] = old[
"encoder/relpos_bias/rel_embedding"
].T
_lowerCamelCase : Optional[Any] = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_lowerCamelCase : Optional[int] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "self_attention" )
_lowerCamelCase : Optional[Any] = layer_norm
_lowerCamelCase : Any = k.T
_lowerCamelCase : Any = o.T
_lowerCamelCase : Optional[int] = q.T
_lowerCamelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_lowerCamelCase : Any = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "encoder_decoder_attention" )
_lowerCamelCase : int = layer_norm
_lowerCamelCase : Union[str, Any] = k.T
_lowerCamelCase : List[str] = o.T
_lowerCamelCase : Any = q.T
_lowerCamelCase : str = v.T
# Block i, layer 2 (MLP).
_lowerCamelCase : Optional[int] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_mlp_layer_norm" )
_lowerCamelCase , _lowerCamelCase : Optional[int] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_lowerCamelCase : Optional[int] = wi[0].T
_lowerCamelCase : List[Any] = wi[1].T
else:
_lowerCamelCase : Dict = wi.T
_lowerCamelCase : Dict = wo.T
_lowerCamelCase : Union[str, Any] = old["decoder/decoder_norm/scale"]
_lowerCamelCase : Union[str, Any] = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCamelCase : Tuple = old["decoder/logits_dense/kernel"].T
return new
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : bool ):
"""simple docstring"""
_lowerCamelCase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCamelCase : Any = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCamelCase : Tuple = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_lowerCamelCase : Optional[Any] = state_dict["shared.weight"]
return state_dict
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
_lowerCamelCase : Tuple = convert_tax_to_pytorch(_lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=_lowerCAmelCase )
_lowerCamelCase : List[Any] = make_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = TaConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCamelCase : Optional[Any] = TaEncoderModel(_lowerCAmelCase )
else:
_lowerCamelCase : Any = TaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCAmelCase )
print("Done" )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
UpperCAmelCase_ : Any = 1
while K:
UpperCAmelCase_ : Optional[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
UpperCAmelCase_ : str = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def A_ ( _lowerCAmelCase : SplitDict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCamelCase : Any = None
# the split name of split_dict takes over the name of the split info object
_lowerCamelCase : Optional[int] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="my_dataset" )] )
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase_ : int = TypeVar('T')
UpperCAmelCase_ : Any = TypeVar('U')
class UpperCAmelCase__ ( Generic[T, U] ):
def __init__( self : Any,__A : T | None,__A : U | None ):
_lowerCamelCase : Optional[Any] = key
_lowerCamelCase : int = val
_lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
_lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCAmelCase__ ( Generic[T, U] ):
def __init__( self : Any ):
_lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__A,__A )
_lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__A,__A )
_lowerCamelCase , _lowerCamelCase : List[str] = self.rear, self.head
def __repr__( self : Tuple ):
_lowerCamelCase : str = ["DoubleLinkedList"]
_lowerCamelCase : Union[str, Any] = self.head
while node.next is not None:
rep.append(str(__A ) )
_lowerCamelCase : Optional[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__A )
def lowerCamelCase_ ( self : Any,__A : DoubleLinkedListNode[T, U] ):
_lowerCamelCase : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCamelCase : Dict = node
_lowerCamelCase : Optional[Any] = previous
_lowerCamelCase : int = node
_lowerCamelCase : int = self.rear
def lowerCamelCase_ ( self : Union[str, Any],__A : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_lowerCamelCase : List[Any] = node.next
_lowerCamelCase : Any = node.prev
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[Any] = None
return node
class UpperCAmelCase__ ( Generic[T, U] ):
lowerCAmelCase_ = {}
def __init__( self : str,__A : int ):
_lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowerCamelCase : Dict = capacity
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Any ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : Union[str, Any],__A : T ):
return key in self.cache
def lowerCamelCase_ ( self : List[Any],__A : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowerCamelCase : int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__A )
return node.val
self.miss += 1
return None
def lowerCamelCase_ ( self : int,__A : T,__A : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCamelCase : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCamelCase : Optional[Any] = DoubleLinkedListNode(__A,__A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCamelCase : Union[str, Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCamelCase : List[Any] = value
self.list.add(__A )
@classmethod
def lowerCamelCase_ ( cls : Tuple,__A : int = 1_2_8 ):
def cache_decorator_inner(__A : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__A : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCamelCase : Tuple = LRUCache(__A )
_lowerCamelCase : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCamelCase : Optional[int] = func(*__A )
cls.decorator_function_to_instance_map[func].put(args[0],__A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__A,"cache_info",__A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = GPTSwaTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : int = GPTSwaTokenizer(__A,eos_token="<unk>",bos_token="<unk>",pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any],__A : Optional[int] ):
_lowerCamelCase : Tuple = "This is a test"
_lowerCamelCase : Any = "This is a test"
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = "<s>"
_lowerCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ),__A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ),__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<unk>" )
self.assertEqual(vocab_keys[1],"<s>" )
self.assertEqual(vocab_keys[-1],"j" )
self.assertEqual(len(__A ),2_0_0_0 )
def lowerCamelCase_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size,2_0_0_0 )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = GPTSwaTokenizer(__A )
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_lowerCamelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__A,["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."],)
# fmt: on
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A,[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],)
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__A )
# fmt: off
self.assertListEqual(
__A,["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = GPTSwaTokenizer(__A )
_lowerCamelCase : str = ["This is a test", "I was born in 92000, and this is falsé."]
_lowerCamelCase : Tuple = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__A,__A ):
self.assertListEqual(tokenizer.encode_fast(__A ),__A )
# Test that decode_fast returns the input text
for text, token_ids in zip(__A,__A ):
self.assertEqual(tokenizer.decode_fast(__A ),__A )
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_lowerCamelCase : List[Any] = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A,model_name="AI-Sweden/gpt-sw3-126m",sequences=__A,) | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = []
for part_id in partition_order:
_lowerCamelCase : Union[str, Any] = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(_lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCamelCase : Optional[int] = spark.range(100 ).repartition(1 )
_lowerCamelCase : Tuple = Spark(_lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCamelCase : Dict = spark.range(10 ).repartition(2 )
_lowerCamelCase : str = [1, 0]
_lowerCamelCase : int = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase ) # Reverse the partitions.
_lowerCamelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCamelCase , _lowerCamelCase : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCamelCase : Tuple = spark.range(10 ).repartition(1 )
_lowerCamelCase : int = SparkExamplesIterable(_lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCamelCase : Optional[int] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCamelCase : Tuple = lambda _lowerCAmelCase : x.reverse()
_lowerCamelCase : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0] )
_lowerCamelCase : Tuple = SparkExamplesIterable(_lowerCAmelCase ).shuffle_data_sources(_lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCamelCase : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCamelCase : Optional[int] = SparkExamplesIterable(_lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCamelCase : Dict = SparkExamplesIterable(_lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCamelCase : Dict = spark.range(100 ).repartition(1 )
_lowerCamelCase : List[str] = Spark(_lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = 'Hello, World!'
UpperCAmelCase_ : Tuple = 'en_XX'
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool ):
"""simple docstring"""
_lowerCamelCase : Dict = Path("data_bin" )
_lowerCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_lowerCAmelCase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
_lowerCamelCase : Tuple = xmod.model.encoder.sentence_encoder
_lowerCamelCase : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_lowerCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCamelCase : str = xmod_sent_encoder.embed_tokens.weight
_lowerCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
_lowerCamelCase : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowerCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
_lowerCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCamelCase : Optional[Any] = model.roberta.encoder.layer[i]
_lowerCamelCase : str = xmod_sent_encoder.layers[i]
# self attention
_lowerCamelCase : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
_lowerCamelCase : Tuple = xmod_layer.self_attn.q_proj.weight
_lowerCamelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
_lowerCamelCase : Tuple = xmod_layer.self_attn.k_proj.weight
_lowerCamelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
_lowerCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
_lowerCamelCase : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowerCamelCase : Dict = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
_lowerCamelCase : List[Any] = xmod_layer.self_attn.out_proj.weight
_lowerCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.bias
_lowerCamelCase : str = xmod_layer.self_attn_layer_norm.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowerCamelCase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
_lowerCamelCase : Any = xmod_layer.fca.weight
_lowerCamelCase : Dict = xmod_layer.fca.bias
# output
_lowerCamelCase : Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
_lowerCamelCase : str = xmod_layer.fca.weight
_lowerCamelCase : Any = xmod_layer.fca.bias
_lowerCamelCase : List[str] = xmod_layer.final_layer_norm.weight
_lowerCamelCase : List[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowerCamelCase : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
_lowerCamelCase : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowerCamelCase : Optional[int] = bert_output.adapter_modules[lang_code]
_lowerCamelCase : List[Any] = xmod_layer.adapter_modules[lang_code]
_lowerCamelCase : int = from_adapter.fca.weight
_lowerCamelCase : Dict = from_adapter.fca.bias
_lowerCamelCase : int = from_adapter.fca.weight
_lowerCamelCase : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowerCamelCase : str = xmod_sent_encoder.layer_norm.weight
_lowerCamelCase : Optional[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowerCamelCase : int = xmod.model.classification_heads["mnli"].dense.weight
_lowerCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
_lowerCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
_lowerCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
_lowerCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
_lowerCamelCase : Any = xmod.model.encoder.lm_head.dense.bias
_lowerCamelCase : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
_lowerCamelCase : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
_lowerCamelCase : Optional[int] = xmod.model.encoder.lm_head.weight
_lowerCamelCase : List[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCamelCase : Any = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
_lowerCamelCase : int = model(_lowerCAmelCase )[0]
if classification_head:
_lowerCamelCase : List[str] = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase ) )
else:
_lowerCamelCase : str = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_lowerCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
_lowerCamelCase : Dict = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase_ : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ : Optional[int] = 'src/transformers'
# Matches is_xxx_available()
UpperCAmelCase_ : Optional[Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ : Union[str, Any] = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ : Optional[int] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCAmelCase_ : List[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ : str = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ : Tuple = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ : str = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ : Optional[int] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ : Dict = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCAmelCase_ : List[str] = re.compile(R'^\s*try:')
# Catches a line with else:
UpperCAmelCase_ : int = re.compile(R'^\s*else:')
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
if _re_test_backend.search(_lowerCAmelCase ) is None:
return None
_lowerCamelCase : Dict = [b[0] for b in _re_backend.findall(_lowerCAmelCase )]
backends.sort()
return "_and_".join(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
with open(_lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : int = f.readlines()
_lowerCamelCase : Optional[int] = 0
while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : Tuple = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCAmelCase ):
_lowerCamelCase : str = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0]
_lowerCamelCase : Optional[Any] = re.findall(r"\[([^\]]+)\]" , _lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_lowerCamelCase : Tuple = _re_import_struct_key_value.search(_lowerCAmelCase )
if single_line_import_search is not None:
_lowerCamelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_lowerCamelCase : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None:
_lowerCamelCase : str = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(", " )
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_between_brackets.search(_lowerCAmelCase ) is not None:
_lowerCamelCase : Any = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(", " )
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_quote_object.search(_lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : List[str] = []
while (
line_index < len(_lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_lowerCamelCase : List[Any] = lines[line_index]
_lowerCamelCase : Optional[int] = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : int = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_lowerCamelCase : Dict = lines[line_index]
_lowerCamelCase : Optional[Any] = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
def find_duplicates(_lowerCAmelCase : List[Any] ):
return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : str = []
for key in import_dict_objects.keys():
_lowerCamelCase : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
_lowerCamelCase : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : Union[str, Any] = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
_lowerCamelCase : Tuple = os.path.join(_lowerCAmelCase , "__init__.py" )
_lowerCamelCase : Optional[Any] = parse_init(_lowerCAmelCase )
if objects is not None:
_lowerCamelCase : Union[str, Any] = analyze_results(*_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_lowerCamelCase : Union[str, Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
raise ValueError("\n\n".join(_lowerCAmelCase ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = []
for path, directories, files in os.walk(_lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_lowerCamelCase : Any = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(_lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : Any = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) )
_lowerCamelCase : List[Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_lowerCAmelCase )
return submodules
UpperCAmelCase_ : int = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def A_ ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
_lowerCamelCase : List[Any] = direct_transformers_import(_lowerCAmelCase )
_lowerCamelCase : Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_lowerCAmelCase , "__init__.py" ) , "r" ) as f:
_lowerCamelCase : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _lowerCAmelCase ) ) )
_lowerCamelCase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_lowerCAmelCase ) > 0:
_lowerCamelCase : Optional[Any] = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : int = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : Optional[int] = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any],__A : Any,__A : str=True,__A : Optional[int]=True,__A : List[Any]=False,__A : str="[CLS]",__A : int="[SEP]",__A : Dict="<unk>",__A : Union[str, Any]="[SEP]",__A : Dict="<pad>",__A : Optional[int]="[CLS]",__A : Any="[MASK]",__A : Optional[Dict[str, Any]] = None,**__A : Optional[int],):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Dict = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
_lowerCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : str = do_lower_case
_lowerCamelCase : int = remove_space
_lowerCamelCase : List[Any] = keep_accents
_lowerCamelCase : int = vocab_file
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def lowerCamelCase_ ( self : Optional[int] ):
return len(self.sp_model )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
_lowerCamelCase : Optional[int] = self.__dict__.copy()
_lowerCamelCase : int = None
return state
def __setstate__( self : List[Any],__A : Dict ):
_lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple ):
if self.remove_space:
_lowerCamelCase : List[Any] = " ".join(inputs.strip().split() )
else:
_lowerCamelCase : List[str] = inputs
_lowerCamelCase : str = outputs.replace("``","\"" ).replace("''","\"" )
if not self.keep_accents:
_lowerCamelCase : Tuple = unicodedata.normalize("NFKD",__A )
_lowerCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
_lowerCamelCase : Optional[int] = outputs.lower()
return outputs
def lowerCamelCase_ ( self : str,__A : str ):
_lowerCamelCase : Dict = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A,out_type=__A )
_lowerCamelCase : Union[str, Any] = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCamelCase : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A,"" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
_lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def lowerCamelCase_ ( self : int,__A : Union[str, Any] ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Union[str, Any],__A : str ):
return self.sp_model.IdToPiece(__A )
def lowerCamelCase_ ( self : List[str],__A : Tuple ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : Optional[Any] = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None,__A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A,token_ids_a=__A,already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def lowerCamelCase_ ( self : str,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : str = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def A_ ( _lowerCAmelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
_lowerCamelCase : List[str] = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = nums[i]
_lowerCamelCase : Tuple = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of elements : ').strip())
UpperCAmelCase_ : Tuple = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase_ : List[str] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir,"models/bert/" ) )
_lowerCamelCase : Any = self.transformer_dir
shutil.copy(
os.path.join(__A,"src/transformers/models/bert/modeling_bert.py" ),os.path.join(self.transformer_dir,"models/bert/modeling_bert.py" ),)
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def lowerCamelCase_ ( self : Any,__A : Dict,__A : List[Any],__A : Union[str, Any],__A : Any=None ):
_lowerCamelCase : List[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_lowerCamelCase : Union[str, Any] = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
_lowerCamelCase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa},line_length=1_1_9 )
_lowerCamelCase : Union[str, Any] = black.format_str(__A,mode=__A )
_lowerCamelCase : Dict = os.path.join(self.transformer_dir,"new_code.py" )
with open(__A,"w",newline="\n" ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name,overwrite=__A )
with open(__A,"r" ) as f:
self.assertTrue(f.read(),__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Dict ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead","BertLMPredictionHead",REFERENCE_CODE + "\n",)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead","BertLMPredictionHead",__A,)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel","TestModelLMPredictionHead",re.sub("Bert","TestModel",__A ),)
# Copy consistency with a really long name
_lowerCamelCase : Tuple = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}',f'{long_class_name}LMPredictionHead',re.sub("Bert",__A,__A ),)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel","TestModelLMPredictionHead",__A,overwrite_result=re.sub("Bert","TestModel",__A ),)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowerCamelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowerCamelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCamelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_lowerCamelCase , _lowerCamelCase : Any = check_copies.convert_to_localized_md(
__A,__A,localized_readme["format_model_list"] )
self.assertFalse(__A )
self.assertEqual(__A,__A )
_lowerCamelCase , _lowerCamelCase : Tuple = check_copies.convert_to_localized_md(
__A,__A,localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
_lowerCamelCase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowerCamelCase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCamelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCamelCase , _lowerCamelCase : List[Any] = check_copies.convert_to_localized_md(
__A,__A,localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__A,__A ) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : List[Any],__A : str ):
with open(__A,encoding="utf-8" ) as input_file:
_lowerCamelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
_lowerCamelCase : List[Any] = input_file.read()
_lowerCamelCase : Any = regexp.search(__A )
return match
def lowerCamelCase_ ( self : str,__A : str ):
with open(__A,encoding="utf-8" ) as input_file:
_lowerCamelCase : str = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()",re.DOTALL )
_lowerCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_lowerCamelCase : int = regexp.finditer(__A )
_lowerCamelCase : Tuple = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = Path("./datasets" )
_lowerCamelCase : List[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__A ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = Path("./datasets" )
_lowerCamelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__A ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' ) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A ) , 'Tatoeba directory does not exist.' )
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__A )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase , _lowerCamelCase : List[Any] = self.resolver.write_model_card("opus-mt-he-en",dry_run=__A )
assert mmeta["long_pair"] == "heb-eng" | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = FunnelTokenizer
lowerCAmelCase_ = FunnelTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : Optional[int] ):
super().setUp()
_lowerCamelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : List[str],**__A : str ):
return FunnelTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,**__A : Optional[int] ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : str ):
_lowerCamelCase : List[Any] = "UNwant\u00E9d,running"
_lowerCamelCase : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__A,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),[7, 4, 5, 1_0, 8, 9] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
_lowerCamelCase : Union[str, Any] = tokenizer("UNwant\u00E9d,running" )
_lowerCamelCase : str = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"],[2] + [0] * sentence_len )
_lowerCamelCase : int = tokenizer("UNwant\u00E9d,running","UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"],[2] + [0] * sentence_len + [1] * sentence_len ) | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCAmelCase_ : Tuple = '</w>'
UpperCAmelCase_ : Optional[Any] = '@@ '
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = set()
_lowerCamelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : str = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ : Union[str, Any] = {'facebook/s2t-wav2vec2-large-en-de': 1024}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Any,__A : Optional[Any],__A : List[Any]="<s>",__A : Tuple="<pad>",__A : Optional[int]="</s>",__A : Dict="<unk>",__A : Union[str, Any]=False,__A : str=None,**__A : Optional[Any],):
super().__init__(
unk_token=__A,bos_token=__A,eos_token=__A,pad_token=__A,do_lower_case=__A,**__A,)
_lowerCamelCase : int = do_lower_case
with open(__A,encoding="utf-8" ) as vocab_handle:
_lowerCamelCase : List[Any] = json.load(__A )
_lowerCamelCase : str = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : str = None
else:
with open(__A,encoding="utf-8" ) as merges_handle:
_lowerCamelCase : Optional[Any] = merges_handle.read().split("\n" )[:-1]
_lowerCamelCase : List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCamelCase : List[Any] = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Tuple = {}
@property
def lowerCamelCase_ ( self : Optional[int] ):
return len(self.decoder )
def lowerCamelCase_ ( self : Any ):
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase_ ( self : int,__A : List[Any] ):
_lowerCamelCase : Tuple = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCamelCase : Optional[Any] = get_pairs(__A )
if not pairs:
return token
while True:
_lowerCamelCase : Any = min(__A,key=lambda __A : self.bpe_ranks.get(__A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase : Optional[Any] = bigram
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Any = 0
while i < len(__A ):
try:
_lowerCamelCase : Tuple = word.index(__A,__A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : Union[str, Any] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : List[str] = tuple(__A )
_lowerCamelCase : Dict = new_word
if len(__A ) == 1:
break
else:
_lowerCamelCase : List[Any] = get_pairs(__A )
_lowerCamelCase : Dict = " ".join(__A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCamelCase : Any = "\n" + BPE_TOKEN_MERGES
if word.endswith(__A ):
_lowerCamelCase : List[str] = word.replace(__A,"" )
_lowerCamelCase : List[Any] = word.replace(" ",__A )
_lowerCamelCase : Tuple = word
return word
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
_lowerCamelCase : List[Any] = text.lower()
_lowerCamelCase : Optional[int] = text.split()
_lowerCamelCase : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__A ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : int,__A : str ):
return self.encoder.get(__A,self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[str],__A : int ):
_lowerCamelCase : List[Any] = self.decoder.get(__A,self.unk_token )
return result
def lowerCamelCase_ ( self : int,__A : List[str] ):
_lowerCamelCase : List[str] = " ".join(__A )
# make sure @@ tokens are concatenated
_lowerCamelCase : int = "".join(string.split(__A ) )
return string
def lowerCamelCase_ ( self : Tuple,__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Any = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=__A,ensure_ascii=__A ) + "\n" )
_lowerCamelCase : Dict = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__A,"w",encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_lowerCamelCase : Any = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return (vocab_file, merges_file) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = (DDPMScheduler,)
def lowerCamelCase_ ( self : str,**__A : List[str] ):
_lowerCamelCase : List[str] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__A )
return config
def lowerCamelCase_ ( self : str ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCamelCase_ ( self : Tuple ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A,beta_end=__A )
def lowerCamelCase_ ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def lowerCamelCase_ ( self : Dict ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__A )
def lowerCamelCase_ ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def lowerCamelCase_ ( self : Tuple ):
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__A,prediction_type=__A,sample_max_value=__A,)
def lowerCamelCase_ ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCamelCase_ ( self : int ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
_lowerCamelCase : List[Any] = len(__A )
_lowerCamelCase : Union[str, Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(__A ) ):
# 1. predict noise residual
_lowerCamelCase : int = model(__A,__A )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : int = scheduler.step(__A,__A,__A,generator=__A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Dict = pred_prev_sample
_lowerCamelCase : Dict = torch.sum(torch.abs(__A ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
_lowerCamelCase : List[str] = len(__A )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(__A ) ):
# 1. predict noise residual
_lowerCamelCase : str = model(__A,__A )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[str] = scheduler.step(__A,__A,__A,generator=__A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : str = pred_prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(__A ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**__A )
_lowerCamelCase : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__A )
_lowerCamelCase : List[str] = scheduler.timesteps
for i, timestep in enumerate(__A ):
if i == len(__A ) - 1:
_lowerCamelCase : Union[str, Any] = -1
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
_lowerCamelCase : Optional[int] = scheduler.previous_timestep(__A )
_lowerCamelCase : int = prev_t.item()
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__A )
_lowerCamelCase : Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__A,msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__A )
_lowerCamelCase : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
_lowerCamelCase : Dict = len(__A )
with self.assertRaises(__A,msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__A,timesteps=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Tuple = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
_lowerCamelCase : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__A,msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",):
scheduler.set_timesteps(timesteps=__A ) | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'AutoTokenizer'
lowerCAmelCase_ = ['tokenizer']
lowerCAmelCase_ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : str,__A : int,__A : int=None ):
super().__init__(__A )
_lowerCamelCase : Optional[Any] = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : List[Any],__A : Optional[Any]="speaker_embeddings_path.json",**__A : Union[str, Any] ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : int = get_file_from_repo(
__A,__A,subfolder=kwargs.pop("subfolder",__A ),cache_dir=kwargs.pop("cache_dir",__A ),force_download=kwargs.pop("force_download",__A ),proxies=kwargs.pop("proxies",__A ),resume_download=kwargs.pop("resume_download",__A ),local_files_only=kwargs.pop("local_files_only",__A ),use_auth_token=kwargs.pop("use_auth_token",__A ),revision=kwargs.pop("revision",__A ),)
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(__A,__A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_lowerCamelCase : Dict = None
else:
with open(__A ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(__A )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(__A,**__A )
return cls(tokenizer=__A,speaker_embeddings=__A )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : Optional[Any]="speaker_embeddings_path.json",__A : int="speaker_embeddings",__A : bool = False,**__A : Tuple,):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__A,__A,"v2" ),exist_ok=__A )
_lowerCamelCase : Dict = {}
_lowerCamelCase : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Union[str, Any] = self._load_voice_preset(__A )
_lowerCamelCase : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"],__A,f'{prompt_key}_{key}' ),voice_preset[key],allow_pickle=__A,)
_lowerCamelCase : List[str] = os.path.join(__A,f'{prompt_key}_{key}.npy' )
_lowerCamelCase : Any = tmp_dict
with open(os.path.join(__A,__A ),"w" ) as fp:
json.dump(__A,__A )
super().save_pretrained(__A,__A,**__A )
def lowerCamelCase_ ( self : Any,__A : str = None,**__A : Optional[int] ):
_lowerCamelCase : Optional[int] = self.speaker_embeddings[voice_preset]
_lowerCamelCase : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_lowerCamelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path","/" ),voice_preset_paths[key],subfolder=kwargs.pop("subfolder",__A ),cache_dir=kwargs.pop("cache_dir",__A ),force_download=kwargs.pop("force_download",__A ),proxies=kwargs.pop("proxies",__A ),resume_download=kwargs.pop("resume_download",__A ),local_files_only=kwargs.pop("local_files_only",__A ),use_auth_token=kwargs.pop("use_auth_token",__A ),revision=kwargs.pop("revision",__A ),)
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path","/" ),voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_lowerCamelCase : int = np.load(__A )
return voice_preset_dict
def lowerCamelCase_ ( self : Any,__A : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key],np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Tuple,__A : List[Any]=None,__A : Optional[int]=None,__A : str="pt",__A : Tuple=2_5_6,__A : Dict=False,__A : List[Any]=True,__A : Dict=False,**__A : Tuple,):
if voice_preset is not None and not isinstance(__A,__A ):
if (
isinstance(__A,__A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Optional[Any] = self._load_voice_preset(__A )
else:
if isinstance(__A,__A ) and not voice_preset.endswith(".npz" ):
_lowerCamelCase : List[str] = voice_preset + ".npz"
_lowerCamelCase : List[Any] = np.load(__A )
if voice_preset is not None:
self._validate_voice_preset_dict(__A,**__A )
_lowerCamelCase : List[str] = BatchFeature(data=__A,tensor_type=__A )
_lowerCamelCase : Dict = self.tokenizer(
__A,return_tensors=__A,padding="max_length",max_length=__A,return_attention_mask=__A,return_token_type_ids=__A,add_special_tokens=__A,**__A,)
if voice_preset is not None:
_lowerCamelCase : Any = voice_preset
return encoded_text | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = 11
_lowerCamelCase : str = int("1" + "0" * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCamelCase : Any = 10
return solutions
def A_ ( _lowerCAmelCase : int = 2 ):
"""simple docstring"""
_lowerCamelCase : int = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution()) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1000 ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Dict = 0
for divide_by_number in range(_lowerCAmelCase , digit + 1 ):
_lowerCamelCase : list[int] = []
_lowerCamelCase : Tuple = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCAmelCase ):
_lowerCamelCase : Any = len(_lowerCAmelCase )
_lowerCamelCase : Dict = divide_by_number
else:
has_been_divided.append(_lowerCAmelCase )
_lowerCamelCase : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : int ):
_lowerCamelCase : Union[str, Any] = num_of_nodes
_lowerCamelCase : list[list[int]] = []
_lowerCamelCase : dict[int, int] = {}
def lowerCamelCase_ ( self : str,__A : int,__A : int,__A : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase_ ( self : int,__A : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase_ ( self : Union[str, Any],__A : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCamelCase : Optional[int] = self.find_component(__A )
def lowerCamelCase_ ( self : List[Any],__A : list[int],__A : int,__A : int ):
if component_size[u_node] <= component_size[v_node]:
_lowerCamelCase : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCamelCase : Optional[int] = self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCamelCase : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = edge
_lowerCamelCase : List[str] = self.m_component[u]
_lowerCamelCase : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCamelCase : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A,__A ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = edge
_lowerCamelCase : Optional[Any] = self.m_component[u]
_lowerCamelCase : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A,__A,__A )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
_lowerCamelCase : Tuple = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
import random
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : dict = {i: [] for i in range(_lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCAmelCase ):
for j in range(i + 1 , _lowerCAmelCase ):
if random.random() < probability:
graph[i].append(_lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCAmelCase )
return graph
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return {
i: [j for j in range(_lowerCAmelCase ) if i != j] for i in range(_lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return []
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = min(_lowerCAmelCase ), max(_lowerCAmelCase )
_lowerCamelCase : int = int(max_value - min_value ) + 1
_lowerCamelCase : list[list] = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = self.task_name.lower()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
lowerCAmelCase_ = 'test'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Any,__A : GlueDataTrainingArguments,__A : PreTrainedTokenizerBase,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[str] = None,):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",__A,)
_lowerCamelCase : int = args
_lowerCamelCase : int = glue_processors[args.task_name]()
_lowerCamelCase : int = glue_output_modes[args.task_name]
if isinstance(__A,__A ):
try:
_lowerCamelCase : Optional[Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_lowerCamelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}',)
_lowerCamelCase : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCamelCase , _lowerCamelCase : Dict = label_list[2], label_list[1]
_lowerCamelCase : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Dict = torch.load(__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_lowerCamelCase : Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCamelCase : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
_lowerCamelCase : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCamelCase : Union[str, Any] = examples[:limit_length]
_lowerCamelCase : List[str] = glue_convert_examples_to_features(
__A,__A,max_length=args.max_seq_length,label_list=__A,output_mode=self.output_mode,)
_lowerCamelCase : int = time.time()
torch.save(self.features,__A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : str,__A : str ):
return self.features[i]
def lowerCamelCase_ ( self : Dict ):
return self.label_list | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Tuple = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : Union[str, Any] = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase_ : Dict = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=1.0 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None ):
"""simple docstring"""
if rng is None:
_lowerCamelCase : List[str] = global_rng
_lowerCamelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict,__A : List[str],__A : Union[str, Any]=7,__A : Union[str, Any]=4_0_0,__A : List[Any]=2_0_0_0,__A : Any=1,__A : Dict=0.0,__A : Optional[int]=1_6_0_0_0,__A : int=True,__A : List[str]=True,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : List[str] = min_seq_length
_lowerCamelCase : Any = max_seq_length
_lowerCamelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : List[str] = feature_size
_lowerCamelCase : Any = padding_value
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : int = return_attention_mask
_lowerCamelCase : int = do_normalize
def lowerCamelCase_ ( self : Dict ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self : List[Any],__A : Any=False,__A : Optional[int]=False ):
def _flatten(__A : Optional[int] ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
_lowerCamelCase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Optional[Any] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
lowerCAmelCase_ = ASTFeatureExtractor
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = ASTFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
_lowerCamelCase : Optional[Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : Dict = feat_extract(speech_inputs[0],return_tensors="np" ).input_values
_lowerCamelCase : int = feat_extract(np_speech_inputs[0],return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__,lowerCAmelCase__,atol=1e-3 ) )
# Test batched
_lowerCamelCase : List[Any] = feat_extract(lowerCAmelCase__,padding=lowerCAmelCase__,return_tensors="np" ).input_values
_lowerCamelCase : int = feat_extract(lowerCAmelCase__,padding=lowerCAmelCase__,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__,lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__,lowerCAmelCase__,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : Optional[Any] = np.asarray(lowerCAmelCase__ )
_lowerCamelCase : Optional[int] = feat_extract(lowerCAmelCase__,return_tensors="np" ).input_values
_lowerCamelCase : Union[str, Any] = feat_extract(lowerCAmelCase__,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__,lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__,lowerCAmelCase__,atol=1e-3 ) )
@require_torch
def lowerCamelCase_ ( self : int ):
import torch
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : Dict = feature_extractor.pad([{"input_values": inputs}],return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : int = feature_extractor.pad([{"input_values": inputs}],return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase_ ( self : Tuple,__A : List[Any] ):
from datasets import load_dataset
_lowerCamelCase : int = load_dataset("hf-internal-testing/librispeech_asr_dummy","clean",split="validation" )
# automatic decoding with librispeech
_lowerCamelCase : str = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase_ ( self : Any ):
# fmt: off
_lowerCamelCase : Union[str, Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_lowerCamelCase : Tuple = self._load_datasamples(1 )
_lowerCamelCase : Union[str, Any] = ASTFeatureExtractor()
_lowerCamelCase : Tuple = feature_extractor(lowerCAmelCase__,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape,(1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0],lowerCAmelCase__,atol=1e-4 ) ) | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase_ : List[Any] = 100
UpperCAmelCase_ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase_ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_lowerCamelCase : List[Any] = set()
_lowerCamelCase : List[Any] = 42
_lowerCamelCase : List[str] = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A_ ( _lowerCAmelCase : Union[str, Any] = 5000 ):
"""simple docstring"""
for number_to_partition in range(1 , _SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
import numpy as np
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = int(np.ceil((x_end - xa) / h ) )
_lowerCamelCase : str = np.zeros((n + 1,) )
_lowerCamelCase : Tuple = ya
_lowerCamelCase : Tuple = xa
for k in range(_lowerCAmelCase ):
_lowerCamelCase : Any = f(_lowerCAmelCase , y[k] )
_lowerCamelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : Optional[int] = f(x + h , y[k] + h * ka )
_lowerCamelCase : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = min(_lowercase ) # min() finds the minimum value
_lowerCamelCase : List[str] = max(_lowercase ) # max() finds the maximum value
_lowerCamelCase : Dict = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCamelCase : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowercase , _lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCamelCase : int = 0
for count in range(_lowercase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCamelCase : List[str] = count + min_val
i += 1
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowercase )
print("Sorted order is:" , " ".join(_lowercase ) )
if __name__ == "__main__":
main() | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : Dict,__A : Optional[int]=1_3,__A : Tuple=3_0,__A : Dict=2,__A : Dict=3,__A : Tuple=True,__A : str=True,__A : Tuple=3_2,__A : List[str]=5,__A : str=4,__A : Optional[int]=3_7,__A : str="gelu",__A : List[str]=0.1,__A : Dict=0.1,__A : Dict=1_0,__A : Optional[int]=0.02,__A : Union[str, Any]=3,__A : Any=0.6,__A : Dict=None,):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = mask_ratio
_lowerCamelCase : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
return ViTMAEConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=UpperCAmelCase_,initializer_range=self.initializer_range,mask_ratio=self.mask_ratio,)
def lowerCamelCase_ ( self : Tuple,__A : int,__A : str,__A : Optional[int] ):
_lowerCamelCase : Dict = ViTMAEModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any],__A : Union[str, Any],__A : Union[str, Any],__A : Tuple ):
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : Dict = model(UpperCAmelCase_ )
_lowerCamelCase : List[str] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ )
_lowerCamelCase : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase_ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[str] = ViTMAEModelTester(self )
_lowerCamelCase : Any = ConfigTester(self,config_class=UpperCAmelCase_,has_text_modality=UpperCAmelCase_,hidden_size=3_7 )
def lowerCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCamelCase_ ( self : Tuple ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_,nn.Linear ) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = model_class(UpperCAmelCase_ )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1],UpperCAmelCase_ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any],__A : str,__A : Any,__A : Union[str, Any] ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_,UpperCAmelCase_,UpperCAmelCase_ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Tuple = model(**self._prepare_for_class(UpperCAmelCase_,UpperCAmelCase_ ) )
_lowerCamelCase : Dict = outputs[0].cpu().numpy()
_lowerCamelCase : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
_lowerCamelCase : str = model_class.from_pretrained(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**self._prepare_for_class(UpperCAmelCase_,UpperCAmelCase_ ) )
# Make sure we don't have nans
_lowerCamelCase : Tuple = after_outputs[0].cpu().numpy()
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : int ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(UpperCAmelCase_ )
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=UpperCAmelCase_,return_tensors="pt" ).to(UpperCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Union[str, Any] = ViTMAEConfig()
_lowerCamelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**UpperCAmelCase_,noise=torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape,UpperCAmelCase_ )
_lowerCamelCase : Any = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3],expected_slice.to(UpperCAmelCase_ ),atol=1e-4 ) ) | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def A_ ( _lowerCAmelCase : int = 100 ):
"""simple docstring"""
_lowerCamelCase : Dict = 1
_lowerCamelCase : Optional[int] = 2
for i in range(2 , max_n + 1 ):
_lowerCamelCase : Any = pre_numerator
_lowerCamelCase : int = 2 * i // 3 if i % 3 == 0 else 1
_lowerCamelCase : Optional[Any] = cur_numerator
_lowerCamelCase : int = e_cont * pre_numerator + temp
return sum_digits(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase__ :
@staticmethod
def lowerCamelCase_ ( *__A : Union[str, Any],**__A : Optional[int] ):
pass
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase_ : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : Union[str, Any],__A : Dict,__A : str,__A : List[str] ):
_lowerCamelCase : Dict = pipeline(
"document-question-answering",model=__A,tokenizer=__A,image_processor=__A )
_lowerCamelCase : Optional[Any] = INVOICE_URL
_lowerCamelCase : Any = list(zip(*apply_tesseract(load_image(__A ),__A,"" ) ) )
_lowerCamelCase : Dict = '''What is the placebo?'''
_lowerCamelCase : Optional[Any] = [
{
'''image''': load_image(__A ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase_ ( self : Any,__A : Optional[int],__A : int ):
_lowerCamelCase : Dict = dqa_pipeline(__A,top_k=2 )
self.assertEqual(
__A,[
[
{"score": ANY(__A ), "answer": ANY(__A ), "start": ANY(__A ), "end": ANY(__A )},
{"score": ANY(__A ), "answer": ANY(__A ), "start": ANY(__A ), "end": ANY(__A )},
]
]
* 3,)
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = pipeline("document-question-answering",model="hf-internal-testing/tiny-random-layoutlmv2" )
_lowerCamelCase : Union[str, Any] = INVOICE_URL
_lowerCamelCase : Tuple = '''How many cats are there?'''
_lowerCamelCase : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
_lowerCamelCase : Optional[Any] = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(nested_simplify(__A,decimals=4 ),__A )
_lowerCamelCase : List[str] = dqa_pipeline({"image": image, "question": question},top_k=2 )
self.assertEqual(nested_simplify(__A,decimals=4 ),__A )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCamelCase : Optional[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCamelCase : Any = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(__A,[] )
# We can optionnally pass directly the words and bounding boxes
_lowerCamelCase : int = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCamelCase : Dict = []
_lowerCamelCase : str = []
_lowerCamelCase : str = dqa_pipeline(image=__A,question=__A,words=__A,boxes=__A,top_k=2 )
self.assertEqual(__A,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Union[str, Any] = pipeline(
"document-question-answering",model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa",revision="9977165",)
_lowerCamelCase : Dict = INVOICE_URL
_lowerCamelCase : str = '''What is the invoice number?'''
_lowerCamelCase : List[str] = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6},
],)
_lowerCamelCase : List[Any] = dqa_pipeline({"image": image, "question": question},top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6},
],)
_lowerCamelCase : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}],top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
[
{"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = pipeline(
"document-question-answering",model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa",revision="9977165",max_seq_len=5_0,)
_lowerCamelCase : Dict = INVOICE_URL
_lowerCamelCase : Any = '''What is the invoice number?'''
_lowerCamelCase : Union[str, Any] = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6},
],)
_lowerCamelCase : int = dqa_pipeline({"image": image, "question": question},top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6},
],)
_lowerCamelCase : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}],top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
[
{"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2,)
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa",revision="3dc6de3",add_prefix_space=__A )
_lowerCamelCase : Tuple = pipeline(
"document-question-answering",model="impira/layoutlm-document-qa",tokenizer=__A,revision="3dc6de3",)
_lowerCamelCase : Tuple = INVOICE_URL
_lowerCamelCase : Any = '''What is the invoice number?'''
_lowerCamelCase : Dict = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
],)
_lowerCamelCase : Any = dqa_pipeline({"image": image, "question": question},top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
],)
_lowerCamelCase : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}],top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
[
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2,)
_lowerCamelCase : Tuple = list(zip(*apply_tesseract(load_image(__A ),__A,"" ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question},top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
],)
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa",revision="3dc6de3",add_prefix_space=__A )
_lowerCamelCase : List[Any] = pipeline(
"document-question-answering",model="impira/layoutlm-document-qa",tokenizer=__A,revision="3dc6de3",max_seq_len=5_0,)
_lowerCamelCase : str = INVOICE_URL
_lowerCamelCase : int = '''What is the invoice number?'''
_lowerCamelCase : Tuple = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6},
],)
_lowerCamelCase : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}],top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
[
{"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2,)
_lowerCamelCase : List[str] = list(zip(*apply_tesseract(load_image(__A ),__A,"" ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase : Union[str, Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question},top_k=2 )
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6},
],)
@slow
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = pipeline(
"document-question-answering",model="naver-clova-ix/donut-base-finetuned-docvqa",tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ),feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa",)
_lowerCamelCase : Any = INVOICE_URL
_lowerCamelCase : Union[str, Any] = '''What is the invoice number?'''
_lowerCamelCase : int = dqa_pipeline(image=__A,question=__A,top_k=2 )
self.assertEqual(nested_simplify(__A,decimals=4 ),[{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def lowerCamelCase_ ( self : Any ):
pass | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
import os
import string
import sys
UpperCAmelCase_ : Optional[int] = 1 << 8
UpperCAmelCase_ : Any = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
UpperCAmelCase_ : Any = KEYMAP['up']
UpperCAmelCase_ : int = KEYMAP['left']
if sys.platform == "win32":
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Any = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCAmelCase_ : Tuple = ord(str(i))
def A_ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
_lowerCamelCase : Optional[Any] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCAmelCase ) == 0:
# Read the keystroke
_lowerCamelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : Optional[int] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCAmelCase )
if ord(__lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : Optional[int] = chr(KEYMAP["esc"] )
except KeyError:
_lowerCamelCase : Union[str, Any] = cha[1]
else:
_lowerCamelCase : List[Any] = ch.decode(__lowerCAmelCase )
else:
_lowerCamelCase : List[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : Tuple = sys.stdin.fileno()
_lowerCamelCase : Any = termios.tcgetattr(__lowerCAmelCase )
try:
tty.setraw(__lowerCAmelCase )
_lowerCamelCase : Dict = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCAmelCase , termios.TCSADRAIN , __lowerCAmelCase )
return ch
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = get_raw_chars()
if ord(__lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCAmelCase ) == KEYMAP["esc"]:
_lowerCamelCase : int = get_raw_chars()
if ord(__lowerCAmelCase ) == KEYMAP["mod_int"]:
_lowerCamelCase : Tuple = get_raw_chars()
if ord(__lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_lowerCamelCase : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowerCamelCase : Dict = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__ , lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = int(input('Enter number of vertices: '))
UpperCAmelCase_ : List[Any] = int(input('Enter number of edges: '))
UpperCAmelCase_ : Union[str, Any] = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCAmelCase_ : Dict = int(input('Enter source:'))
UpperCAmelCase_ : str = int(input('Enter destination:'))
UpperCAmelCase_ : str = float(input('Enter weight:'))
UpperCAmelCase_ : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if not nums:
raise ValueError("List is empty" )
return sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase__ ( _snake_case ):
@slow
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny","prajjwal1/bert-tiny" )
_lowerCamelCase : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Dict = bertabert.config.encoder.vocab_size
_lowerCamelCase : List[str] = tokenizer.sep_token_id
_lowerCamelCase : List[str] = tokenizer.cls_token_id
_lowerCamelCase : Optional[int] = 1_2_8
_lowerCamelCase : Optional[Any] = datasets.load_dataset("cnn_dailymail","3.0.0",split="train[:1%]" )
_lowerCamelCase : Dict = datasets.load_dataset("cnn_dailymail","3.0.0",split="validation[:1%]" )
_lowerCamelCase : Optional[Any] = train_dataset.select(range(3_2 ) )
_lowerCamelCase : str = val_dataset.select(range(1_6 ) )
_lowerCamelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(__A : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : int = tokenizer(batch["article"],padding="max_length",truncation=__A,max_length=5_1_2 )
_lowerCamelCase : Optional[int] = tokenizer(batch["highlights"],padding="max_length",truncation=__A,max_length=1_2_8 )
_lowerCamelCase : Tuple = inputs.input_ids
_lowerCamelCase : Any = inputs.attention_mask
_lowerCamelCase : Any = outputs.input_ids
_lowerCamelCase : Any = outputs.input_ids.copy()
_lowerCamelCase : List[str] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
_lowerCamelCase : Tuple = outputs.attention_mask
assert all(len(__A ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__A ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__A : Tuple ):
_lowerCamelCase : List[Any] = pred.label_ids
_lowerCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : int = tokenizer.batch_decode(__A,skip_special_tokens=__A )
_lowerCamelCase : List[str] = tokenizer.batch_decode(__A,skip_special_tokens=__A )
_lowerCamelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=__A,batch_size=__A,remove_columns=["article", "highlights"],)
train_dataset.set_format(
type="torch",columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],)
# same for validation dataset
_lowerCamelCase : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=__A,batch_size=__A,remove_columns=["article", "highlights"],)
val_dataset.set_format(
type="torch",columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],)
_lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=__A,per_device_train_batch_size=__A,per_device_eval_batch_size=__A,predict_with_generate=__A,evaluation_strategy="steps",do_train=__A,do_eval=__A,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
_lowerCamelCase : Union[str, Any] = SeqaSeqTrainer(
model=__A,args=__A,compute_metrics=_compute_metrics,train_dataset=__A,eval_dataset=__A,tokenizer=__A,)
# start training
trainer.train() | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCAmelCase_ : List[Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
UpperCAmelCase_ : str = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Union[str, Any] = r".*sequential.(\d+).*"
_lowerCamelCase : Optional[int] = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCamelCase : List[Any] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
_lowerCamelCase : List[str] = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
_lowerCamelCase : Tuple = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCamelCase : Optional[Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCamelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
_lowerCamelCase : Dict = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCamelCase : Optional[Any] = value
_lowerCamelCase : Dict = mixed_qkv.size(0 ) // 3
_lowerCamelCase : List[str] = mixed_qkv[:qkv_dim]
_lowerCamelCase : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCamelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
_lowerCamelCase : int = query_layer
_lowerCamelCase : List[Any] = key_layer
_lowerCamelCase : Dict = value_layer
else:
_lowerCamelCase : Dict = value
return model_state_dict
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
_lowerCamelCase : int = clap_model.state_dict()
_lowerCamelCase : Dict = rename_state_dict(lowerCAmelCase__ )
_lowerCamelCase : Tuple = ClapConfig()
_lowerCamelCase : List[str] = enable_fusion
_lowerCamelCase : Dict = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [0, 2, 4, 6, 8]
UpperCAmelCase_ : Tuple = [1, 3, 5, 7, 9]
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCamelCase : Union[str, Any] = 0
for digit in range(10 ):
_lowerCamelCase : Optional[int] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __A , __A )
return result
_lowerCamelCase : Optional[Any] = 0
for digita in range(10 ):
_lowerCamelCase : Tuple = digita
if (remainder + digita) % 2 == 0:
_lowerCamelCase : Tuple = ODD_DIGITS
else:
_lowerCamelCase : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCamelCase : Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __A , __A , )
return result
def A_ ( _lowerCAmelCase : Tuple = 9 ):
"""simple docstring"""
_lowerCamelCase : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__A , 0 , [0] * length , __A )
return result
if __name__ == "__main__":
print(f'''{solution() = }''') | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( a__ ):
lowerCAmelCase_ = 'camembert'
def __init__( self : Optional[Any],__A : str=3_0_5_2_2,__A : Optional[int]=7_6_8,__A : Optional[int]=1_2,__A : Optional[int]=1_2,__A : Union[str, Any]=3_0_7_2,__A : List[str]="gelu",__A : Union[str, Any]=0.1,__A : Union[str, Any]=0.1,__A : List[str]=5_1_2,__A : int=2,__A : List[Any]=0.02,__A : Tuple=1e-12,__A : Union[str, Any]=1,__A : Tuple=0,__A : Union[str, Any]=2,__A : Dict="absolute",__A : Dict=True,__A : Any=None,**__A : Any,):
super().__init__(pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,**_A )
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Dict = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : int = classifier_dropout
class UpperCAmelCase__ ( a__ ):
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
_lowerCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : int,__A : Tuple=1_3,__A : List[str]=3_0,__A : Tuple=2,__A : Any=3,__A : Dict=True,__A : Optional[int]=True,__A : int=3_2,__A : List[str]=5,__A : Any=4,__A : Optional[int]=3_7,__A : List[Any]="gelu",__A : int=0.1,__A : Union[str, Any]=0.1,__A : Optional[Any]=1_0,__A : Dict=0.02,__A : Tuple=3,__A : Union[str, Any]=None,__A : List[Any]=2,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCamelCase : List[str] = (image_size // patch_size) ** 2
_lowerCamelCase : List[str] = num_patches + 2
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : str ):
return DeiTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def lowerCamelCase_ ( self : int,__A : Optional[Any],__A : int,__A : List[str] ):
_lowerCamelCase : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any],__A : Dict,__A : Any ):
_lowerCamelCase : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : Any,__A : str,__A : List[str],__A : int ):
_lowerCamelCase : str = self.type_sequence_label_size
_lowerCamelCase : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Tuple = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Tuple = config_and_inputs
_lowerCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _A , _A , unittest.TestCase ):
lowerCAmelCase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : str = DeiTModelTester(self )
_lowerCamelCase : Optional[int] = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=3_7 )
def lowerCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCamelCase_ ( self : Any ):
pass
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase,nn.Linear ) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCamelCase )
_lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Optional[Any],__A : List[Any]=False ):
_lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase,__lowerCamelCase,return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowerCamelCase : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase,return_labels=__lowerCamelCase )
_lowerCamelCase : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowerCamelCase : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
_lowerCamelCase : Dict = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase,return_labels=__lowerCamelCase )
_lowerCamelCase : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
_lowerCamelCase : Tuple = problem_type["title"]
_lowerCamelCase : Optional[Any] = problem_type["num_labels"]
_lowerCamelCase : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
_lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase,return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
_lowerCamelCase : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1,problem_type["num_labels"] )
_lowerCamelCase : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
_lowerCamelCase : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowerCamelCase_ ( self : List[Any] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Tuple ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
_lowerCamelCase : List[Any] = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=__lowerCamelCase,return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] = model(**__lowerCamelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
_lowerCamelCase : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224",torch_dtype=torch.floataa,device_map="auto" )
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=__lowerCamelCase,return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCamelCase ) | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase__ ( _UpperCAmelCase ):
def __init__( self : Optional[int],**__A : str ):
super().__init__(**lowerCamelCase_ )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : str,__A : Union[np.ndarray, bytes, str],**__A : List[str] ):
return super().__call__(lowerCamelCase_,**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
_lowerCamelCase : List[str] = {}
if "candidate_labels" in kwargs:
_lowerCamelCase : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_lowerCamelCase : Any = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : str,__A : Dict,__A : List[str]=None,__A : Any="This is a sound of {}." ):
if isinstance(lowerCamelCase_,lowerCamelCase_ ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCamelCase : Optional[Any] = requests.get(lowerCamelCase_ ).content
else:
with open(lowerCamelCase_,"rb" ) as f:
_lowerCamelCase : Any = f.read()
if isinstance(lowerCamelCase_,lowerCamelCase_ ):
_lowerCamelCase : str = ffmpeg_read(lowerCamelCase_,self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase_,np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
_lowerCamelCase : Any = self.feature_extractor(
[audio],sampling_rate=self.feature_extractor.sampling_rate,return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = candidate_labels
_lowerCamelCase : Any = [hypothesis_template.format(lowerCamelCase_ ) for x in candidate_labels]
_lowerCamelCase : Union[str, Any] = self.tokenizer(lowerCamelCase_,return_tensors=self.framework,padding=lowerCamelCase_ )
_lowerCamelCase : Dict = [text_inputs]
return inputs
def lowerCamelCase_ ( self : List[str],__A : Dict ):
_lowerCamelCase : Union[str, Any] = model_inputs.pop("candidate_labels" )
_lowerCamelCase : Union[str, Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0],lowerCamelCase_ ):
_lowerCamelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
_lowerCamelCase : List[str] = text_inputs[0][0]
_lowerCamelCase : Union[str, Any] = self.model(**lowerCamelCase_,**lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCamelCase_ ( self : str,__A : Optional[Any] ):
_lowerCamelCase : Dict = model_outputs.pop("candidate_labels" )
_lowerCamelCase : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
_lowerCamelCase : Dict = logits.softmax(dim=0 )
_lowerCamelCase : Dict = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
_lowerCamelCase : Dict = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_,lowerCamelCase_ ),key=lambda __A : -x[0] )
]
return result | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = '''▁'''
UpperCAmelCase_ : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : Optional[int] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCAmelCase_ : List[str] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCAmelCase_ : List[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCAmelCase__ ( _UpperCAmelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self : Tuple,__A : Any,__A : Any="<s>",__A : Any="</s>",__A : Dict="</s>",__A : Optional[Any]="<s>",__A : str="<unk>",__A : Optional[Any]="<pad>",__A : List[Any]="<mask>",__A : Union[str, Any]=None,__A : Optional[int]=None,__A : Union[str, Any]=None,__A : Optional[Dict[str, Any]] = None,__A : Any=None,**__A : Tuple,):
_lowerCamelCase : Any = AddedToken(__UpperCamelCase,lstrip=__UpperCamelCase,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase,__UpperCamelCase ) else mask_token
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase,eos_token=__UpperCamelCase,unk_token=__UpperCamelCase,sep_token=__UpperCamelCase,cls_token=__UpperCamelCase,pad_token=__UpperCamelCase,mask_token=__UpperCamelCase,tokenizer_file=__UpperCamelCase,src_lang=__UpperCamelCase,tgt_lang=__UpperCamelCase,additional_special_tokens=__UpperCamelCase,sp_model_kwargs=self.sp_model_kwargs,**__UpperCamelCase,)
_lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_lowerCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Optional[int] = len(self.sp_model )
_lowerCamelCase : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase )
}
_lowerCamelCase : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCamelCase : Optional[int] = src_lang if src_lang is not None else "en_XX"
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[self._src_lang]
_lowerCamelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
_lowerCamelCase : Optional[int] = self.__dict__.copy()
_lowerCamelCase : str = None
_lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any],__A : Dict ):
_lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase_ ( self : Any ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self : int,__A : str ):
_lowerCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None,__A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase,token_ids_a=__UpperCamelCase,already_has_special_tokens=__UpperCamelCase )
_lowerCamelCase : int = [1] * len(self.prefix_tokens )
_lowerCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def lowerCamelCase_ ( self : str,__A : List[int],__A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any],__A : str,__A : Optional[str],__A : Optional[str],**__A : Tuple ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_lowerCamelCase : int = src_lang
_lowerCamelCase : List[str] = self(__UpperCamelCase,add_special_tokens=__UpperCamelCase,return_tensors=__UpperCamelCase,**__UpperCamelCase )
_lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(__UpperCamelCase )
_lowerCamelCase : Tuple = tgt_lang_id
return inputs
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple,__A : str ):
return self.sp_model.encode(__UpperCamelCase,out_type=__UpperCamelCase )
def lowerCamelCase_ ( self : Dict,__A : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Tuple = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : int,__A : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : Dict,__A : Dict ):
_lowerCamelCase : str = "".join(__UpperCamelCase ).replace(__UpperCamelCase," " ).strip()
return out_string
def lowerCamelCase_ ( self : Tuple,__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Optional[int] = os.path.join(
__UpperCamelCase,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase,"wb" ) as fi:
_lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase_ ( self : List[Any],__A : List[str],__A : str = "en_XX",__A : Optional[List[str]] = None,__A : str = "ro_RO",**__A : Tuple,):
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase,__UpperCamelCase,**__UpperCamelCase )
def lowerCamelCase_ ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self : List[str],__A : Tuple ):
_lowerCamelCase : Union[str, Any] = self.lang_code_to_id[src_lang]
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = [self.eos_token_id, self.cur_lang_code]
def lowerCamelCase_ ( self : Optional[int],__A : str ):
_lowerCamelCase : Union[str, Any] = self.lang_code_to_id[lang]
_lowerCamelCase : List[Any] = []
_lowerCamelCase : int = [self.eos_token_id, self.cur_lang_code]
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return EnvironmentCommand()
class UpperCAmelCase__ ( _a ):
@staticmethod
def lowerCamelCase_ ( __A : List[str] ):
_lowerCamelCase : Any = parser.add_parser("env" )
download_parser.set_defaults(func=snake_case_ )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Union[str, Any] = huggingface_hub.__version__
_lowerCamelCase : int = "not installed"
_lowerCamelCase : int = "NA"
if is_torch_available():
import torch
_lowerCamelCase : List[Any] = torch.__version__
_lowerCamelCase : Dict = torch.cuda.is_available()
_lowerCamelCase : Optional[Any] = "not installed"
if is_transformers_available():
import transformers
_lowerCamelCase : List[Any] = transformers.__version__
_lowerCamelCase : Optional[Any] = "not installed"
if is_accelerate_available():
import accelerate
_lowerCamelCase : str = accelerate.__version__
_lowerCamelCase : List[Any] = "not installed"
if is_xformers_available():
import xformers
_lowerCamelCase : int = xformers.__version__
_lowerCamelCase : str = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(snake_case_ ) )
return info
@staticmethod
def lowerCamelCase_ ( __A : List[str] ):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n" | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int],__A : Optional[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"],model_result["ss"] ):
_lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = """sshleifer/tiny-gpt2"""
_lowerCamelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],eager_mode=_lowercase,multi_process=_lowercase,)
_lowerCamelCase : Dict = TensorFlowBenchmark(_lowercase )
_lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = """sgugger/tiny-distilbert-classification"""
_lowerCamelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],multi_process=_lowercase,only_pretrain_model=_lowercase,)
_lowerCamelCase : Optional[Any] = TensorFlowBenchmark(_lowercase )
_lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],multi_process=_lowercase,)
_lowerCamelCase : int = TensorFlowBenchmark(_lowercase )
_lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
_lowerCamelCase : str = AutoConfig.from_pretrained(_lowercase )
_lowerCamelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],eager_mode=_lowercase,multi_process=_lowercase,)
_lowerCamelCase : List[Any] = TensorFlowBenchmark(_lowercase,[config] )
_lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowercase )
_lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],multi_process=_lowercase,)
_lowerCamelCase : List[str] = TensorFlowBenchmark(_lowercase,[config] )
_lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = """sshleifer/tiny-gpt2"""
_lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],multi_process=_lowercase,)
_lowerCamelCase : List[Any] = TensorFlowBenchmark(_lowercase )
_lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowercase )
_lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],multi_process=_lowercase,)
_lowerCamelCase : Optional[Any] = TensorFlowBenchmark(_lowercase,[config] )
_lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = """patrickvonplaten/t5-tiny-random"""
_lowerCamelCase : int = AutoConfig.from_pretrained(_lowercase )
_lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],multi_process=_lowercase,)
_lowerCamelCase : Tuple = TensorFlowBenchmark(_lowercase,configs=[config] )
_lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0,"Cannot do xla on CPU." )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = """sshleifer/tiny-gpt2"""
_lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_lowercase,inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],use_xla=_lowercase,multi_process=_lowercase,)
_lowerCamelCase : int = TensorFlowBenchmark(_lowercase )
_lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID],inference=_lowercase,save_to_csv=_lowercase,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(_lowercase,"inf_time.csv" ),inference_memory_csv_file=os.path.join(_lowercase,"inf_mem.csv" ),env_info_csv_file=os.path.join(_lowercase,"env.csv" ),multi_process=_lowercase,)
_lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase,"inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase,"inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase,"env.csv" ) ).exists() )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Dict = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__A : List[str] ):
self.assertTrue(hasattr(_lowercase,"sequential" ) )
self.assertTrue(hasattr(_lowercase,"cumulative" ) )
self.assertTrue(hasattr(_lowercase,"current" ) )
self.assertTrue(hasattr(_lowercase,"total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID],inference=_lowercase,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(_lowercase,"log.txt" ),log_print=_lowercase,trace_memory_line_by_line=_lowercase,eager_mode=_lowercase,multi_process=_lowercase,)
_lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(_lowercase )
_lowerCamelCase : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowercase,"log.txt" ) ).exists() ) | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_lowerCamelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_lowerCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowerCamelCase_ ( self : Optional[int] ):
print(f'Found {torch.cuda.device_count()} devices.' )
_lowerCamelCase : Optional[Any] = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_,env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase_ ( self : Optional[int] ):
print(f'Found {torch.cuda.device_count()} devices.' )
_lowerCamelCase : Tuple = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_,env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_,env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase_ ( self : str ):
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_lowerCamelCase : List[Any] = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1,cuda_visible_devices="0,1" ):
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ : str = Accelerator()
UpperCAmelCase_ : Dict = (accelerator.state.process_index + 2, 10)
UpperCAmelCase_ : str = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase_ : Optional[int] = ''
UpperCAmelCase_ : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase_ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase_ : Dict = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
lowerCAmelCase_ = 'vision-encoder-decoder'
lowerCAmelCase_ = True
def __init__( self : int,**__A : List[Any] ):
super().__init__(**__A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_lowerCamelCase : Optional[Any] = kwargs.pop("encoder" )
_lowerCamelCase : Dict = encoder_config.pop("model_type" )
_lowerCamelCase : Any = kwargs.pop("decoder" )
_lowerCamelCase : Optional[int] = decoder_config.pop("model_type" )
_lowerCamelCase : Optional[int] = AutoConfig.for_model(__A,**__A )
_lowerCamelCase : Dict = AutoConfig.for_model(__A,**__A )
_lowerCamelCase : str = True
@classmethod
def lowerCamelCase_ ( cls : Any,__A : int,__A : Dict,**__A : int ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_lowerCamelCase : List[str] = True
_lowerCamelCase : int = True
return cls(encoder=encoder_config.to_dict(),decoder=decoder_config.to_dict(),**__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : List[Any] = self.encoder.to_dict()
_lowerCamelCase : str = self.decoder.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output
class UpperCAmelCase__ ( lowercase__ ):
lowerCAmelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Tuple ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase_ ( self : int ):
return 1e-4
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCAmelCase__ ( lowercase__ ):
@property
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[int] = OrderedDict()
_lowerCamelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCamelCase : str = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCamelCase : List[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : int = -1,__A : Dict = -1,__A : str = False,__A : Optional[Any] = None,):
import torch
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : List[Any] = super().generate_dummy_inputs(
__A,batch_size=__A,seq_length=__A,is_pair=__A,framework=__A )
_lowerCamelCase , _lowerCamelCase : Optional[int] = dummy_input["input_ids"].shape
_lowerCamelCase : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase : List[Any] = dummy_input.pop("input_ids" )
_lowerCamelCase : int = dummy_input.pop("attention_mask" )
_lowerCamelCase : int = torch.zeros(__A )
return common_inputs
class UpperCAmelCase__ ( lowercase__ ):
@property
def lowerCamelCase_ ( self : str ):
pass
def lowerCamelCase_ ( self : Dict,__A : Dict ):
return VisionEncoderDecoderEncoderOnnxConfig(__A )
def lowerCamelCase_ ( self : List[str],__A : Union[str, Any],__A : Optional[int],__A : Optional[Any] = "default" ):
_lowerCamelCase : Optional[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__A,__A ) | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'gpt_neo'
lowerCAmelCase_ = ['past_key_values']
lowerCAmelCase_ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[str],__A : Optional[Any]=5_0_2_5_7,__A : List[Any]=2_0_4_8,__A : List[str]=2_0_4_8,__A : List[str]=2_4,__A : Dict=[[["global", "local"], 1_2]],__A : Any=1_6,__A : List[Any]=None,__A : str=2_5_6,__A : Union[str, Any]="gelu_new",__A : Union[str, Any]=0.0,__A : str=0.0,__A : Any=0.0,__A : List[str]=0.1,__A : Union[str, Any]=1e-5,__A : Any=0.02,__A : str=True,__A : Tuple=5_0_2_5_6,__A : Dict=5_0_2_5_6,**__A : List[Any],):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Optional[Any] = num_layers
_lowerCamelCase : str = num_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : int = window_size
_lowerCamelCase : Any = activation_function
_lowerCamelCase : Optional[Any] = resid_dropout
_lowerCamelCase : Any = embed_dropout
_lowerCamelCase : str = attention_dropout
_lowerCamelCase : Dict = classifier_dropout
_lowerCamelCase : Tuple = layer_norm_epsilon
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Any = eos_token_id
_lowerCamelCase : Tuple = attention_types
_lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__A,eos_token_id=__A,**__A )
@staticmethod
def lowerCamelCase_ ( __A : List[str] ):
_lowerCamelCase : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
import torch
_lowerCamelCase : Tuple = input.size()
_lowerCamelCase : Any = len(_lowerCAmelCase )
_lowerCamelCase : str = shape[dimension]
_lowerCamelCase : Union[str, Any] = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Tuple = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1
_lowerCamelCase : List[Any] = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None]
_lowerCamelCase : Tuple = [slice(_lowerCAmelCase )] * rank
_lowerCamelCase : int = indices
_lowerCamelCase : Dict = input[s]
_lowerCamelCase : str = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
"""simple docstring"""
import torch
_lowerCamelCase : Any = torch.arange(1 , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.remainder(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : List[Any] = remainders == 0
_lowerCamelCase : Any = candidates[divisor_indices]
_lowerCamelCase : Union[str, Any] = torch.max(_lowerCAmelCase )
return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" )
class UpperCAmelCase__ ( A ):
@property
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__A,direction="inputs" )
_lowerCamelCase : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase_ ( self : Dict ):
return self._config.num_heads
def lowerCamelCase_ ( self : Dict,__A : Tuple,__A : int = -1,__A : List[str] = -1,__A : Optional[int] = False,__A : Optional[int] = None,):
_lowerCamelCase : Any = super(__A,self ).generate_dummy_inputs(
__A,batch_size=__A,seq_length=__A,is_pair=__A,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : List[str] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : Tuple = seqlen + 2
_lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : List[Any] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCamelCase : str = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__A,__A,dtype=__A )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : int ):
return 1_3 | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : list[list[str]] = [[] for _ in range(snake_case_ )]
_lowerCamelCase : Optional[int] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(snake_case_ ) <= key:
return input_string
for position, character in enumerate(snake_case_ ):
_lowerCamelCase : Union[str, Any] = position % (lowest * 2) # puts it in bounds
_lowerCamelCase : Dict = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case_ )
_lowerCamelCase : Tuple = ["".join(snake_case_ ) for row in temp_grid]
_lowerCamelCase : List[str] = "".join(snake_case_ )
return output_string
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
_lowerCamelCase : list[list[str]] = [[] for _ in range(snake_case_ )] # generates template
for position in range(len(snake_case_ ) ):
_lowerCamelCase : Optional[Any] = position % (lowest * 2) # puts it in bounds
_lowerCamelCase : str = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
_lowerCamelCase : int = 0
for row in temp_grid: # fills in the characters
_lowerCamelCase : Dict = input_string[counter : counter + len(snake_case_ )]
grid.append(list(snake_case_ ) )
counter += len(snake_case_ )
_lowerCamelCase : Dict = "" # reads as zigzag
for position in range(len(snake_case_ ) ):
_lowerCamelCase : List[Any] = position % (lowest * 2) # puts it in bounds
_lowerCamelCase : Tuple = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
for key_guess in range(1 , len(snake_case_ ) ): # tries every key
_lowerCamelCase : Tuple = decrypt(snake_case_ , snake_case_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase_ : Tuple = 100
UpperCAmelCase_ : Optional[int] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase_ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_lowerCamelCase : set[int] = set()
_lowerCamelCase : int
_lowerCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1 , _lowerCamelCase ):
if len(partition(_lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple,*__A : List[str],**__A : str ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead.",_a,)
super().__init__(*_a,**_a ) | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( __a ):
def __init__( self : int,*__A : Dict,**__A : Optional[int] ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead.",a_,)
super().__init__(*a_,**a_ ) | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionDiffEditPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowerCAmelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ = frozenset([] )
def lowerCamelCase_ ( self : List[str] ):
torch.manual_seed(0 )
_lowerCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=__lowerCAmelCase,)
_lowerCamelCase : Tuple = DDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule="scaled_linear",clip_sample=__lowerCAmelCase,set_alpha_to_one=__lowerCAmelCase,)
_lowerCamelCase : Any = DDIMInverseScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule="scaled_linear",clip_sample=__lowerCAmelCase,set_alpha_to_zero=__lowerCAmelCase,)
torch.manual_seed(0 )
_lowerCamelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
_lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act="gelu",projection_dim=5_1_2,)
_lowerCamelCase : Any = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase_ ( self : str,__A : Tuple,__A : List[str]=0 ):
_lowerCamelCase : Optional[int] = floats_tensor((1, 1_6, 1_6),rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : str = floats_tensor((1, 2, 4, 1_6, 1_6),rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith("mps" ):
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : List[str] = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : str,__A : Optional[int],__A : Optional[int]=0 ):
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Dict = image.cpu().permute(0,2,3,1 )[0]
_lowerCamelCase : str = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" )
if str(__lowerCAmelCase ).startswith("mps" ):
_lowerCamelCase : Tuple = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : Dict=0 ):
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0,2,3,1 )[0]
_lowerCamelCase : List[str] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" )
if str(__lowerCAmelCase ).startswith("mps" ):
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
if not hasattr(self.pipeline_class,"_optional_components" ):
return
_lowerCamelCase : List[str] = self.get_dummy_components()
_lowerCamelCase : str = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowerCamelCase : Any = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase,__lowerCAmelCase ) is None,f'`{optional_component}` did not stay set to None after loading.',)
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCAmelCase,1e-4 )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Dict = "cpu"
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Dict = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_mask_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = pipe.generate_mask(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape,(1, 1_6, 1_6) )
_lowerCamelCase : Optional[Any] = np.array([0] * 9 )
_lowerCamelCase : List[Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase,1e-3 )
self.assertEqual(mask[0, -3, -4],0 )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[Any] = "cpu"
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = self.get_dummy_inversion_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = pipe.invert(**__lowerCAmelCase ).images
_lowerCamelCase : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape,(2, 3_2, 3_2, 3) )
_lowerCamelCase : Optional[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],)
_lowerCamelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase,1e-3 )
def lowerCamelCase_ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = "cpu"
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : str = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
_lowerCamelCase : List[str] = DPMSolverMultistepScheduler(**__lowerCAmelCase )
_lowerCamelCase : str = DPMSolverMultistepInverseScheduler(**__lowerCAmelCase )
_lowerCamelCase : List[str] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = self.get_dummy_inversion_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipe.invert(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape,(2, 3_2, 3_2, 3) )
_lowerCamelCase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],)
_lowerCamelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase,1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
_lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
_lowerCamelCase : Dict = raw_image.convert("RGB" ).resize((7_6_8, 7_6_8) )
_lowerCamelCase : Optional[Any] = raw_image
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : Any = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1",safety_checker=__lowerCAmelCase,torch_dtype=torch.floataa )
_lowerCamelCase : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[str] = "a bowl of fruit"
_lowerCamelCase : Optional[Any] = "a bowl of pears"
_lowerCamelCase : List[str] = pipe.generate_mask(
image=self.raw_image,source_prompt=__lowerCAmelCase,target_prompt=__lowerCAmelCase,generator=__lowerCAmelCase,)
_lowerCamelCase : List[str] = pipe.invert(
prompt=__lowerCAmelCase,image=self.raw_image,inpaint_strength=0.7,generator=__lowerCAmelCase ).latents
_lowerCamelCase : Optional[Any] = pipe(
prompt=__lowerCAmelCase,mask_image=__lowerCAmelCase,image_latents=__lowerCAmelCase,generator=__lowerCAmelCase,negative_prompt=__lowerCAmelCase,inpaint_strength=0.7,output_type="numpy",).images[0]
_lowerCamelCase : Optional[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1",safety_checker=__lowerCAmelCase,torch_dtype=torch.floataa )
_lowerCamelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[str] = "a bowl of fruit"
_lowerCamelCase : str = "a bowl of pears"
_lowerCamelCase : Tuple = pipe.generate_mask(
image=self.raw_image,source_prompt=__lowerCAmelCase,target_prompt=__lowerCAmelCase,generator=__lowerCAmelCase,)
_lowerCamelCase : List[Any] = pipe.invert(
prompt=__lowerCAmelCase,image=self.raw_image,inpaint_strength=0.7,generator=__lowerCAmelCase,num_inference_steps=2_5,).latents
_lowerCamelCase : Optional[int] = pipe(
prompt=__lowerCAmelCase,mask_image=__lowerCAmelCase,image_latents=__lowerCAmelCase,generator=__lowerCAmelCase,negative_prompt=__lowerCAmelCase,inpaint_strength=0.7,num_inference_steps=2_5,output_type="numpy",).images[0]
_lowerCamelCase : Optional[int] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1 | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 10 , _lowerCAmelCase : int = 22 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = range(1 , UpperCAmelCase__ )
_lowerCamelCase : Tuple = range(1 , UpperCAmelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''') | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 1.5
_lowerCamelCase : List[str] = int(factor * num_class_images )
_lowerCamelCase : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=_lowercase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCamelCase : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
_lowerCamelCase : Optional[int] = int(factor * num_images )
_lowerCamelCase : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F'{class_data_dir}/caption.txt' , "w" ) as fa, open(F'{class_data_dir}/urls.txt' , "w" ) as fa, open(
F'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_lowerCamelCase : Optional[Any] = class_images[count]
count += 1
try:
_lowerCamelCase : str = requests.get(images["url"] )
if img.status_code == 200:
_lowerCamelCase : int = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=8 ):
"""simple docstring"""
_lowerCamelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any],__A : UNetaDConditionModel,__A : DDPMScheduler,__A : VQModel,):
super().__init__()
self.register_modules(
unet=__lowerCAmelCase,scheduler=__lowerCAmelCase,movq=__lowerCAmelCase,)
_lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : Optional[Any],__A : List[str],__A : Optional[Any],__A : Tuple,__A : Dict ):
if latents is None:
_lowerCamelCase : Union[str, Any] = randn_tensor(__lowerCAmelCase,generator=__lowerCAmelCase,device=__lowerCAmelCase,dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : List[Any] = latents.to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : Any,__A : Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : Dict = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase,__lowerCAmelCase )
def lowerCamelCase_ ( self : Dict,__A : Any=0 ):
if is_accelerate_available() and is_accelerate_version(">=","0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu",silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase , _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase,__lowerCAmelCase,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self : List[Any] ):
if not hasattr(self.unet,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : List[str],__A : Union[torch.FloatTensor, List[torch.FloatTensor]],__A : Union[torch.FloatTensor, List[torch.FloatTensor]],__A : int = 5_1_2,__A : int = 5_1_2,__A : int = 1_0_0,__A : float = 4.0,__A : int = 1,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : Optional[str] = "pil",__A : bool = True,):
_lowerCamelCase : Dict = self._execution_device
_lowerCamelCase : List[Any] = guidance_scale > 1.0
if isinstance(__lowerCAmelCase,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = torch.cat(__lowerCAmelCase,dim=0 )
_lowerCamelCase : int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowerCAmelCase,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = torch.cat(__lowerCAmelCase,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase,dim=0 )
_lowerCamelCase : Any = negative_image_embeds.repeat_interleave(__lowerCAmelCase,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds],dim=0 ).to(dtype=self.unet.dtype,device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase,device=__lowerCAmelCase )
_lowerCamelCase : List[str] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.unet.config.in_channels
_lowerCamelCase , _lowerCamelCase : List[str] = downscale_height_and_width(__lowerCAmelCase,__lowerCAmelCase,self.movq_scale_factor )
# create initial latent
_lowerCamelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width),image_embeds.dtype,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,self.scheduler,)
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Optional[int] = {"image_embeds": image_embeds}
_lowerCamelCase : int = self.unet(
sample=__lowerCAmelCase,timestep=__lowerCAmelCase,encoder_hidden_states=__lowerCAmelCase,added_cond_kwargs=__lowerCAmelCase,return_dict=__lowerCAmelCase,)[0]
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1],dim=1 )
_lowerCamelCase , _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase , _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text],dim=1 )
if not (
hasattr(self.scheduler.config,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1],dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,generator=__lowerCAmelCase,)[0]
# post-processing
_lowerCamelCase : Any = self.movq.decode(__lowerCAmelCase,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_lowerCamelCase : int = image * 0.5 + 0.5
_lowerCamelCase : List[str] = image.clamp(0,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0,2,3,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : Optional[Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase ) | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class UpperCAmelCase__ ( __UpperCAmelCase ):
lowerCAmelCase_ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase_ = Features({'text': Value('string' )} )
lowerCAmelCase_ = Features({'labels': ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowerCamelCase_ ( self : Dict,__A : int ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column],UpperCAmelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
_lowerCamelCase : Dict = copy.deepcopy(self )
_lowerCamelCase : List[Any] = self.label_schema.copy()
_lowerCamelCase : str = features[self.label_column]
_lowerCamelCase : Optional[Any] = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase_ : Optional[int] = True
except ImportError:
UpperCAmelCase_ : str = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase_ : int = _get_torch_home()
except ImportError:
UpperCAmelCase_ : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
UpperCAmelCase_ : List[Any] = os.path.join(torch_cache_home, 'transformers')
UpperCAmelCase_ : Optional[int] = 'https://cdn.huggingface.co'
UpperCAmelCase_ : Tuple = 'https://s3.amazonaws.com/models.huggingface.co/bert'
UpperCAmelCase_ : List[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
UpperCAmelCase_ : str = os.path.join(PATH, 'config.yaml')
UpperCAmelCase_ : Optional[Any] = os.path.join(PATH, 'attributes.txt')
UpperCAmelCase_ : Any = os.path.join(PATH, 'objects.txt')
UpperCAmelCase_ : str = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
UpperCAmelCase_ : List[Any] = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase_ : str = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase_ : int = 'pytorch_model.bin'
UpperCAmelCase_ : Any = 'config.yaml'
def A_ ( _lowerCAmelCase : Tuple=OBJECTS , _lowerCAmelCase : int=ATTRIBUTES ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_lowerCamelCase : Optional[Any] = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OrderedDict()
with open(_lowercase , "rb" ) as f:
_lowerCamelCase : List[Any] = pkl.load(_lowercase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
_lowerCamelCase : Dict = ckp.pop(_lowercase )
if isinstance(_lowercase , np.ndarray ):
_lowerCamelCase : Optional[int] = torch.tensor(_lowercase )
else:
assert isinstance(_lowercase , torch.tensor ), type(_lowercase )
_lowerCamelCase : Optional[Any] = v
return r
class UpperCAmelCase__ :
lowerCAmelCase_ = {}
def __init__( self : List[Any],__A : dict,__A : str = "root",__A : Dict=0 ):
_lowerCamelCase : Dict = name
_lowerCamelCase : Tuple = level
_lowerCamelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_lowerCamelCase : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
_lowerCamelCase : Optional[int] = copy.deepcopy(UpperCamelCase__ )
if isinstance(UpperCamelCase__,UpperCamelCase__ ):
_lowerCamelCase : Optional[Any] = Config(UpperCamelCase__,name=UpperCamelCase__,level=level + 1 )
_lowerCamelCase : Optional[int] = v
setattr(self,UpperCamelCase__,UpperCamelCase__ )
_lowerCamelCase : Tuple = d
def __repr__( self : List[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : int,__A : Union[str, Any],__A : Dict ):
_lowerCamelCase : Optional[int] = val
_lowerCamelCase : Optional[Any] = val
_lowerCamelCase : Optional[int] = key.split("." )
_lowerCamelCase : List[str] = len(UpperCamelCase__ ) - 1
_lowerCamelCase : List[str] = self._pointer
if len(UpperCamelCase__ ) > 1:
for i, l in enumerate(UpperCamelCase__ ):
if hasattr(self,UpperCamelCase__ ) and isinstance(getattr(self,UpperCamelCase__ ),UpperCamelCase__ ):
setattr(getattr(self,UpperCamelCase__ ),".".join(levels[i:] ),UpperCamelCase__ )
if l == last_level:
_lowerCamelCase : Tuple = val
else:
_lowerCamelCase : Any = pointer[l]
def lowerCamelCase_ ( self : List[Any] ):
return self._pointer
def lowerCamelCase_ ( self : str,__A : Optional[int],__A : Optional[Any] ):
with open(f'{file_name}',"w" ) as stream:
dump(UpperCamelCase__,UpperCamelCase__ )
def lowerCamelCase_ ( self : Any,__A : List[Any],__A : List[str] ):
with open(f'{file_name}',"w" ) as stream:
json.dump(UpperCamelCase__,UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( __A : Dict ):
with open(UpperCamelCase__ ) as stream:
_lowerCamelCase : Tuple = load(UpperCamelCase__,Loader=UpperCamelCase__ )
return data
def __str__( self : Tuple ):
_lowerCamelCase : Dict = ''' '''
if self._name != "root":
_lowerCamelCase : Optional[int] = f'{t * (self._level-1)}{self._name}:\n'
else:
_lowerCamelCase : int = ''''''
_lowerCamelCase : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCamelCase__,UpperCamelCase__ ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(UpperCamelCase__ ).__name__})\n'
_lowerCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def lowerCamelCase_ ( cls : Dict,__A : str,**__A : List[Any] ):
_lowerCamelCase : List[Any] = cls.get_config_dict(UpperCamelCase__,**UpperCamelCase__ )
return cls(UpperCamelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : str,**__A : str ):
_lowerCamelCase : Dict = kwargs.pop("cache_dir",UpperCamelCase__ )
_lowerCamelCase : str = kwargs.pop("force_download",UpperCamelCase__ )
_lowerCamelCase : Tuple = kwargs.pop("resume_download",UpperCamelCase__ )
_lowerCamelCase : List[str] = kwargs.pop("proxies",UpperCamelCase__ )
_lowerCamelCase : Tuple = kwargs.pop("local_files_only",UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
_lowerCamelCase : Optional[Any] = os.path.join(UpperCamelCase__,UpperCamelCase__ )
elif os.path.isfile(UpperCamelCase__ ) or is_remote_url(UpperCamelCase__ ):
_lowerCamelCase : Any = pretrained_model_name_or_path
else:
_lowerCamelCase : Optional[Any] = hf_bucket_url(UpperCamelCase__,filename=UpperCamelCase__,use_cdn=UpperCamelCase__ )
try:
# Load from URL or cache if already cached
_lowerCamelCase : Optional[Any] = cached_path(
UpperCamelCase__,cache_dir=UpperCamelCase__,force_download=UpperCamelCase__,proxies=UpperCamelCase__,resume_download=UpperCamelCase__,local_files_only=UpperCamelCase__,)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_lowerCamelCase : List[Any] = Config.load_yaml(UpperCamelCase__ )
except EnvironmentError:
_lowerCamelCase : Dict = '''Can\'t load config for'''
raise EnvironmentError(UpperCamelCase__ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCamelCase__ ), kwargs
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.load("dump.pt" , map_location=in_tensor.device )
_lowerCamelCase : Tuple = in_tensor.numpy()
_lowerCamelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowercase , _lowercase , rtol=0.0_1 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(_lowercase , _lowercase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = urlparse(_lowercase )
return parsed.scheme in ("http", "https")
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str=True ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_lowerCamelCase : Optional[int] = '''/''' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : Dict=None , ):
"""simple docstring"""
_lowerCamelCase : List[str] = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowercase , _lowercase ):
ua += "; " + "; ".join("{}/{}".format(_lowercase , _lowercase ) for k, v in user_agent.items() )
elif isinstance(_lowercase , _lowercase ):
ua += "; " + user_agent
_lowerCamelCase : List[str] = {'''user-agent''': ua}
if resume_size > 0:
_lowerCamelCase : Dict = '''bytes=%d-''' % (resume_size,)
_lowerCamelCase : Tuple = requests.get(_lowercase , stream=_lowercase , proxies=_lowercase , headers=_lowercase )
if response.status_code == 416: # Range not satisfiable
return
_lowerCamelCase : Dict = response.headers.get("Content-Length" )
_lowerCamelCase : List[Any] = resume_size + int(_lowercase ) if content_length is not None else None
_lowerCamelCase : Optional[Any] = tqdm(
unit="B" , unit_scale=_lowercase , total=_lowercase , initial=_lowercase , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowercase ) )
temp_file.write(_lowercase )
progress.close()
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=False , ):
"""simple docstring"""
if cache_dir is None:
_lowerCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase : Optional[Any] = str(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
_lowerCamelCase : str = None
if not local_files_only:
try:
_lowerCamelCase : int = requests.head(_lowercase , allow_redirects=_lowercase , proxies=_lowercase , timeout=_lowercase )
if response.status_code == 200:
_lowerCamelCase : List[Any] = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_lowerCamelCase : Optional[int] = url_to_filename(_lowercase , _lowercase )
# get cache path to put the file
_lowerCamelCase : Union[str, Any] = os.path.join(_lowercase , _lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowercase ):
return cache_path
else:
_lowerCamelCase : Any = [
file
for file in fnmatch.filter(os.listdir(_lowercase ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(_lowercase ) > 0:
return os.path.join(_lowercase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(_lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_lowerCamelCase : List[str] = cache_path + '''.lock'''
with FileLock(_lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_lowerCamelCase : int = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_lowercase , "a+b" ) as f:
yield f
_lowerCamelCase : List[Any] = _resumable_file_manager
if os.path.exists(_lowercase ):
_lowerCamelCase : List[Any] = os.stat(_lowercase ).st_size
else:
_lowerCamelCase : Dict = 0
else:
_lowerCamelCase : Any = partial(tempfile.NamedTemporaryFile , dir=_lowercase , delete=_lowercase )
_lowerCamelCase : List[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , _lowercase , temp_file.name , )
http_get(
_lowercase , _lowercase , proxies=_lowercase , resume_size=_lowercase , user_agent=_lowercase , )
os.replace(temp_file.name , _lowercase )
_lowerCamelCase : Optional[int] = {'''url''': url, '''etag''': etag}
_lowerCamelCase : int = cache_path + '''.json'''
with open(_lowercase , "w" ) as meta_file:
json.dump(_lowercase , _lowercase )
return cache_path
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int=None ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = url.encode("utf-8" )
_lowerCamelCase : Optional[Any] = shaaaa(_lowercase )
_lowerCamelCase : Optional[Any] = url_hash.hexdigest()
if etag:
_lowerCamelCase : Any = etag.encode("utf-8" )
_lowerCamelCase : Dict = shaaaa(_lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : str=None , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=False , ):
"""simple docstring"""
if cache_dir is None:
_lowerCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase : int = str(_lowercase )
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase : int = str(_lowercase )
if is_remote_url(_lowercase ):
# URL, so get it from the cache (downloading if necessary)
_lowerCamelCase : List[Any] = get_from_cache(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , user_agent=_lowercase , local_files_only=_lowercase , )
elif os.path.exists(_lowercase ):
# File, and it exists.
_lowerCamelCase : Optional[Any] = url_or_filename
elif urlparse(_lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(_lowercase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(_lowercase ) )
if extract_compressed_file:
if not is_zipfile(_lowercase ) and not tarfile.is_tarfile(_lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_lowerCamelCase : str = os.path.split(_lowercase )
_lowerCamelCase : Any = output_file.replace("." , "-" ) + '''-extracted'''
_lowerCamelCase : List[str] = os.path.join(_lowercase , _lowercase )
if os.path.isdir(_lowercase ) and os.listdir(_lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_lowerCamelCase : int = output_path + '''.lock'''
with FileLock(_lowercase ):
shutil.rmtree(_lowercase , ignore_errors=_lowercase )
os.makedirs(_lowercase )
if is_zipfile(_lowercase ):
with ZipFile(_lowercase , "r" ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
elif tarfile.is_tarfile(_lowercase ):
_lowerCamelCase : Dict = tarfile.open(_lowercase )
tar_file.extractall(_lowercase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(_lowercase ) )
return output_path_extracted
return output_path
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]="," ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
with open(_lowercase ) as f:
_lowerCamelCase : Union[str, Any] = eval(f.read() )
else:
_lowerCamelCase : Tuple = requests.get(_lowercase )
try:
_lowerCamelCase : str = requests.json()
except Exception:
_lowerCamelCase : Tuple = req.content.decode()
assert data is not None, "could not connect"
try:
_lowerCamelCase : List[str] = eval(_lowercase )
except Exception:
_lowerCamelCase : Optional[int] = data.split("\n" )
req.close()
return data
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = requests.get(_lowercase )
_lowerCamelCase : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowercase )
with open(_lowercase , "rb" ) as stream:
_lowerCamelCase : int = pkl.load(_lowercase )
_lowerCamelCase : str = weights.pop("model" )
_lowerCamelCase : Union[str, Any] = {}
for k, v in model.items():
_lowerCamelCase : List[Any] = torch.from_numpy(_lowercase )
if "running_var" in k:
_lowerCamelCase : Optional[Any] = torch.tensor([0] )
_lowerCamelCase : int = k.replace("running_var" , "num_batches_tracked" )
_lowerCamelCase : int = zero
return new
def A_ ( ):
"""simple docstring"""
print(F'{os.path.abspath(os.path.join(_lowercase , os.pardir ) )}/demo.ipynb' )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]="RGB" ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
_lowerCamelCase : Any = cva.imread(_lowercase )
else:
_lowerCamelCase : Union[str, Any] = get_image_from_url(_lowercase )
assert img is not None, F'could not connect to: {im}'
_lowerCamelCase : int = cva.cvtColor(_lowercase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_lowerCamelCase : Optional[Any] = img[:, :, ::-1]
return img
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(_lowercase ) , _lowercase )) | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class UpperCAmelCase__ ( A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase_ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase_ = Features({'text': Value('string' )} )
lowerCAmelCase_ = Features({'labels': ClassLabel} )
lowerCAmelCase_ = 'text'
lowerCAmelCase_ = 'labels'
def lowerCamelCase_ ( self : Optional[Any],__A : Any ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column],_SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
_lowerCamelCase : int = copy.deepcopy(self )
_lowerCamelCase : str = self.label_schema.copy()
_lowerCamelCase : Optional[Any] = features[self.label_column]
_lowerCamelCase : Tuple = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return {
self.text_column: "text",
self.label_column: "labels",
} | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase_ = ''
lowerCAmelCase_ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Any,__A : Optional[DatasetInfo] = None,__A : Optional[str] = None,**__A : Union[str, Any],):
super().__init__(self,**__A )
_lowerCamelCase : str = repo_info
_lowerCamelCase : Union[str, Any] = token
_lowerCamelCase : int = None
def lowerCamelCase_ ( self : Optional[int] ):
if self.dir_cache is None:
_lowerCamelCase : List[str] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCamelCase : Tuple = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__A ): {"name": str(__A ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self : List[Any],__A : str,__A : str = "rb",**__A : str,):
if not isinstance(self.repo_info,__A ):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' )
_lowerCamelCase : Optional[Any] = hf_hub_url(self.repo_info.id,__A,revision=self.repo_info.sha )
return fsspec.open(
__A,mode=__A,headers=get_authentication_headers_for_url(__A,use_auth_token=self.token ),client_kwargs={"trust_env": True},).open()
def lowerCamelCase_ ( self : Any,__A : Tuple,**__A : int ):
self._get_dirs()
_lowerCamelCase : str = self._strip_protocol(__A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__A )
def lowerCamelCase_ ( self : List[str],__A : Tuple,__A : Tuple=False,**__A : Optional[Any] ):
self._get_dirs()
_lowerCamelCase : int = PurePosixPath(path.strip("/" ) )
_lowerCamelCase : Optional[Any] = {}
for p, f in self.dir_cache.items():
_lowerCamelCase : int = PurePosixPath(p.strip("/" ) )
_lowerCamelCase : Any = p.parent
if root == path:
_lowerCamelCase : Optional[Any] = f
_lowerCamelCase : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
UpperCAmelCase_ : Optional[int] = '▁'
class UpperCAmelCase__ ( lowercase__ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any],__A : List[Any],__A : List[str]="</s>",__A : str="<unk>",__A : List[Any]="<pad>",__A : Optional[Any]=1_0_0,__A : List[Any]=None,__A : Optional[Dict[str, Any]] = None,__A : Optional[Any]=True,**__A : Optional[Any],):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCamelCase : Any = [f'<extra_id_{i}>' for i in range(__lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCamelCase : Dict = len(set(filter(lambda __A : bool("extra_id" in str(__lowercase ) ),__lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_lowerCamelCase : int = legacy
_lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase,unk_token=__lowercase,pad_token=__lowercase,extra_ids=__lowercase,additional_special_tokens=__lowercase,sp_model_kwargs=self.sp_model_kwargs,legacy=__lowercase,**__lowercase,)
_lowerCamelCase : str = vocab_file
_lowerCamelCase : Optional[int] = extra_ids
_lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@staticmethod
def lowerCamelCase_ ( __A : List[Any],__A : Union[str, Any],__A : str ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCamelCase : int = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.",__lowercase,)
return max_model_length
@property
def lowerCamelCase_ ( self : List[str] ):
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : List[int],__A : Optional[List[int]] = None,__A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase,token_ids_a=__lowercase,already_has_special_tokens=__lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__lowercase )) + [1]
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def lowerCamelCase_ ( self : Union[str, Any] ):
return list(
set(filter(lambda __A : bool(re.search(r"<extra_id_\d+>",__lowercase ) ) is not None,self.additional_special_tokens ) ) )
def lowerCamelCase_ ( self : str ):
return [self._convert_token_to_id(__lowercase ) for token in self.get_sentinel_tokens()]
def lowerCamelCase_ ( self : Any,__A : List[int] ):
if len(__lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase_ ( self : Union[str, Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase_ ( self : str,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = self._add_eos_if_not_present(__lowercase )
if token_ids_a is None:
return token_ids_a
else:
_lowerCamelCase : str = self._add_eos_if_not_present(__lowercase )
return token_ids_a + token_ids_a
def __getstate__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : Optional[int] = None
return state
def __setstate__( self : Dict,__A : List[str] ):
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : Dict = {}
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str],__A : "TextInput",**__A : str ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCamelCase : int = SPIECE_UNDERLINE + text.replace(__lowercase," " )
return super().tokenize(__lowercase,**__lowercase )
def lowerCamelCase_ ( self : Union[str, Any],__A : Optional[Any],**__A : List[Any] ):
if not self.legacy:
_lowerCamelCase : Any = text.startswith(__lowercase )
if is_first:
_lowerCamelCase : Union[str, Any] = text[1:]
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__lowercase,out_type=__lowercase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__lowercase ):
_lowerCamelCase : str = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase_ ( self : Tuple,__A : Tuple ):
if token.startswith("<extra_id_" ):
_lowerCamelCase : List[Any] = re.match(r"<extra_id_(\d+)>",__lowercase )
_lowerCamelCase : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__lowercase )
def lowerCamelCase_ ( self : Optional[Any],__A : Optional[int] ):
if index < self.sp_model.get_piece_size():
_lowerCamelCase : Optional[Any] = self.sp_model.IdToPiece(__lowercase )
else:
_lowerCamelCase : List[str] = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def lowerCamelCase_ ( self : Optional[int],__A : Tuple ):
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
_lowerCamelCase : List[str] = True
_lowerCamelCase : str = []
else:
current_sub_tokens.append(__lowercase )
_lowerCamelCase : Tuple = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def lowerCamelCase_ ( self : List[str],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__lowercase,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase,"wb" ) as fi:
_lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,) | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase__ ( __snake_case ):
def __init__( self : Any,*__A : Optional[int],__A : Tuple=None,__A : Optional[Any]=None,__A : Dict=None,**__A : int ):
super().__init__(*A_,**A_ )
_lowerCamelCase : Tuple = eval_examples
_lowerCamelCase : Dict = post_process_function
_lowerCamelCase : Union[str, Any] = quant_trainer_args
_lowerCamelCase : Any = 1_2_8 # default number of calibration samples
def lowerCamelCase_ ( self : List[Any],__A : List[str]=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
_lowerCamelCase : Any = calib_dataset if calib_dataset is not None else self.calib_dataset
_lowerCamelCase : Dict = self._remove_unused_columns(A_,description="Calibration" )
return DataLoader(
A_,batch_size=self.args.eval_batch_size,collate_fn=self.data_collator,drop_last=self.args.dataloader_drop_last,num_workers=self.args.dataloader_num_workers,pin_memory=self.args.dataloader_pin_memory,shuffle=A_,)
def lowerCamelCase_ ( self : str,__A : Tuple=None ):
_lowerCamelCase : Any = self.train_dataset if calib_dataset is None else calib_dataset
_lowerCamelCase : Dict = self.get_calib_dataloader(A_ )
_lowerCamelCase : Optional[int] = self.model
quant_trainer.configure_model(A_,self.quant_trainer_args,calib=A_ )
model.eval()
quant_trainer.enable_calibration(A_ )
logger.info("***** Running calibration *****" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(A_ ):
# Prediction step
_lowerCamelCase : List[Any] = self.prediction_step(A_,A_,prediction_loss_only=A_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A_,self.quant_trainer_args )
_lowerCamelCase : str = model
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any]=None,__A : Dict=None,__A : str=None,__A : List[Any] = "eval" ):
_lowerCamelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCamelCase : Any = self.get_eval_dataloader(A_ )
_lowerCamelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : List[Any] = self.compute_metrics
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : int = eval_loop(
A_,description="Evaluation",prediction_loss_only=True if compute_metrics is None else None,ignore_keys=A_,)
finally:
_lowerCamelCase : Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_lowerCamelCase : List[str] = self.post_process_function(A_,A_,output.predictions )
_lowerCamelCase : Dict = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_lowerCamelCase : Optional[int] = metrics.pop(A_ )
self.log(A_ )
else:
_lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowerCamelCase : Tuple = self.callback_handler.on_evaluate(self.args,self.state,self.control,A_ )
return metrics
def lowerCamelCase_ ( self : Any,__A : List[Any],__A : List[str],__A : List[str]=None,__A : List[Any] = "test" ):
_lowerCamelCase : Optional[int] = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : int = self.compute_metrics
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : Any = eval_loop(
A_,description="Prediction",prediction_loss_only=True if compute_metrics is None else None,ignore_keys=A_,)
finally:
_lowerCamelCase : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCamelCase : List[str] = self.post_process_function(A_,A_,output.predictions,"predict" )
_lowerCamelCase : Optional[Any] = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_lowerCamelCase : Tuple = metrics.pop(A_ )
return PredictionOutput(predictions=predictions.predictions,label_ids=predictions.label_ids,metrics=A_ )
def lowerCamelCase_ ( self : List[Any],__A : Any="./" ):
_lowerCamelCase : str = self.eval_dataset
_lowerCamelCase : Dict = self.get_eval_dataloader(A_ )
_lowerCamelCase : List[str] = next(iter(A_ ) )
# saving device - to make it consistent
_lowerCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
_lowerCamelCase : Union[str, Any] = tuple(v.to(A_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
_lowerCamelCase : str = True
_lowerCamelCase : int = self.model.to(A_ )
model.eval()
model.float()
_lowerCamelCase : Tuple = model.module if hasattr(A_,"module" ) else model
quant_trainer.configure_model(A_,self.quant_trainer_args )
_lowerCamelCase : int = os.path.join(A_,"model.onnx" )
logger.info(f'exporting model to {output_model_file}' )
_lowerCamelCase : List[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
A_,A_,A_,export_params=A_,opset_version=1_3,do_constant_folding=A_,input_names=["input_ids", "attention_mask", "token_type_ids"],output_names=["output_start_logits", "output_end_logits"],dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
},verbose=A_,)
logger.info("onnx export finished" )
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase__ ( UpperCamelCase__ ):
lowerCAmelCase_ = """facebook/bart-large-mnli"""
lowerCAmelCase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCAmelCase_ = """text_classifier"""
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ["""text""", ["""text"""]]
lowerCAmelCase_ = ["""text"""]
def lowerCamelCase_ ( self : List[Any] ):
super().setup()
_lowerCamelCase : Dict = self.model.config
_lowerCamelCase : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
_lowerCamelCase : List[Any] = int(__A )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowerCamelCase_ ( self : str,__A : Union[str, Any],__A : Dict ):
_lowerCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__A ),[f'This example is {label}' for label in labels],return_tensors="pt",padding="max_length",)
def lowerCamelCase_ ( self : Tuple,__A : Dict ):
_lowerCamelCase : Tuple = outputs.logits
_lowerCamelCase : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
from itertools import count
def A_ ( _lowerCAmelCase : Optional[int] = 50 ):
"""simple docstring"""
_lowerCamelCase : int = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''') | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCAmelCase_ : Dict = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : Optional[Any],__A : Dict=None ):
_lowerCamelCase : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self,lowerCAmelCase_,getattr(lowerCAmelCase_,lowerCAmelCase_ ) )
_lowerCamelCase : str = module._original_module if isinstance(lowerCAmelCase_,_PatchedModuleObj ) else module
class UpperCAmelCase__ :
lowerCAmelCase_ = []
def __init__( self : Tuple,__A : str,__A : Dict,__A : List[str],__A : str=None ):
_lowerCamelCase : int = obj
_lowerCamelCase : Optional[int] = target
_lowerCamelCase : str = new
_lowerCamelCase : List[Any] = target.split("." )[0]
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = attrs or []
def __enter__( self : List[Any] ):
*_lowerCamelCase , _lowerCamelCase : List[Any] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase_ ) ):
try:
_lowerCamelCase : Tuple = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : Optional[int] = getattr(self.obj,lowerCAmelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase_,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj,lowerCAmelCase_,_PatchedModuleObj(lowerCAmelCase_,attrs=self.attrs ) )
_lowerCamelCase : Optional[Any] = getattr(self.obj,lowerCAmelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase_,lowerCAmelCase_,_PatchedModuleObj(getattr(lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_ ),attrs=self.attrs ) )
_lowerCamelCase : Optional[int] = getattr(lowerCAmelCase_,lowerCAmelCase_ )
# finally set the target attribute
setattr(lowerCAmelCase_,lowerCAmelCase_,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : Optional[Any] = getattr(import_module(".".join(lowerCAmelCase_ ) ),lowerCAmelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj,lowerCAmelCase_ ) is attr_value:
_lowerCamelCase : Union[str, Any] = getattr(self.obj,lowerCAmelCase_ )
setattr(self.obj,lowerCAmelCase_,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : Any = globals()["__builtins__"][target_attr]
setattr(self.obj,lowerCAmelCase_,self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Optional[Any],*__A : Dict ):
for attr in list(self.original ):
setattr(self.obj,lowerCAmelCase_,self.original.pop(lowerCAmelCase_ ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase_ ( self : Union[str, Any] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.