code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , """tf_padding"""))
self.parent.assertTrue(hasattr(lowercase_ , """depth_multiplier"""))
class __snake_case :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : str=13 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=32 , _UpperCamelCase : List[str]=0.2_5 , _UpperCamelCase : int=8 , _UpperCamelCase : str=8 , _UpperCamelCase : Tuple=6 , _UpperCamelCase : Dict=32 , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Any="relu6" , _UpperCamelCase : Optional[Any]=1280 , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : List[Any]=None , ) ->int:
"""simple docstring"""
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : int = depth_multiplier
_lowerCamelCase : Union[str, Any] = depth_divisible_by
_lowerCamelCase : List[str] = min_depth
_lowerCamelCase : str = expand_ratio
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = output_stride
_lowerCamelCase : Tuple = first_layer_is_expansion
_lowerCamelCase : str = finegrained_output
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
_lowerCamelCase : List[Any] = classifier_dropout_prob
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : int = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MobileNetVaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowerCamelCase : Dict = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : int) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Tuple = MobileNetVaForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_lowerCamelCase : int = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Any = MobileNetVaForSemanticSegmentation(lowercase_)
model.to(lowercase_)
model.eval()
_lowerCamelCase : Union[str, Any] = model(lowercase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : str = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = MobileNetVaModelTester(self)
_lowerCamelCase : Optional[Any] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : int) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""")
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""")
def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(lowercase_)
_lowerCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]):
_lowerCamelCase : Tuple = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_lowerCamelCase : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_))
_lowerCamelCase : List[str] = outputs.hidden_states
_lowerCamelCase : Union[str, Any] = 16
self.assertEqual(len(lowercase_) , lowercase_)
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = MobileNetVaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""") if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""").to(lowercase_)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=lowercase_ , return_tensors="""pt""").to(lowercase_)
# forward pass
with torch.no_grad():
_lowerCamelCase : Any = model(**lowercase_)
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , lowercase_)
_lowerCamelCase : int = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""")
_lowerCamelCase : Any = model.to(lowercase_)
_lowerCamelCase : Dict = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""")
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=lowercase_ , return_tensors="""pt""").to(lowercase_)
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] = model(**lowercase_)
_lowerCamelCase : Any = outputs.logits
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , lowercase_)
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4))
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase : Any ={
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowerCAmelCase : Any ={
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowerCAmelCase : Any ={
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowerCAmelCase : List[Any] ={
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowerCAmelCase : str ={
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowerCAmelCase : Optional[Any] ={
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def A__ ( __A ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def A__ ( __A , __A , __A , __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
_lowerCamelCase : Dict = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
_lowerCamelCase : str = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
_lowerCamelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
_lowerCamelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
_lowerCamelCase : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
_lowerCamelCase : List[Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
_lowerCamelCase : List[Any] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def A__ ( __A , __A , __A , __A , __A=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCamelCase : Union[str, Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.norm.bias"""]
_lowerCamelCase : Optional[int] = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Tuple = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = torch.load(snake_case__ , map_location="""cpu""" )
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[str] = checkpoint["""time_embed.0.weight"""]
_lowerCamelCase : List[str] = checkpoint["""time_embed.0.bias"""]
_lowerCamelCase : Any = checkpoint["""time_embed.2.weight"""]
_lowerCamelCase : Tuple = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : Dict = checkpoint["""label_emb.weight"""]
_lowerCamelCase : Union[str, Any] = checkpoint["""input_blocks.0.0.weight"""]
_lowerCamelCase : Union[str, Any] = checkpoint["""input_blocks.0.0.bias"""]
_lowerCamelCase : Tuple = unet_config["""down_block_types"""]
_lowerCamelCase : Tuple = unet_config["""layers_per_block"""]
_lowerCamelCase : Dict = unet_config["""attention_head_dim"""]
_lowerCamelCase : int = unet_config["""block_out_channels"""]
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_lowerCamelCase : Tuple = channels_list[i]
_lowerCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_lowerCamelCase : Optional[int] = F"""down_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Union[str, Any] = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Tuple = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_lowerCamelCase : List[Any] = F"""down_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Optional[int] = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : Optional[int] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Optional[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
_lowerCamelCase : Any = F"""down_blocks.{i}.attentions.{j}"""
_lowerCamelCase : Tuple = F"""input_blocks.{current_layer}.1"""
_lowerCamelCase : int = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_lowerCamelCase : Any = F"""down_blocks.{i}.downsamplers.0"""
_lowerCamelCase : int = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : int = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
_lowerCamelCase : Dict = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Any = """mid_block.resnets.0"""
_lowerCamelCase : Any = """middle_block.0"""
_lowerCamelCase : Dict = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : Tuple = """mid_block.attentions.0"""
_lowerCamelCase : Dict = """middle_block.1"""
_lowerCamelCase : Any = convert_attention(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : int = """mid_block.resnets.1"""
_lowerCamelCase : str = """middle_block.2"""
_lowerCamelCase : Tuple = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : Any = 0
_lowerCamelCase : str = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : List[str] = F"""up_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Tuple = F"""output_blocks.{current_layer}.0"""
_lowerCamelCase : List[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_lowerCamelCase : List[str] = F"""up_blocks.{i}.upsamplers.0"""
_lowerCamelCase : Union[str, Any] = F"""output_blocks.{current_layer-1}.1"""
_lowerCamelCase : Any = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Optional[int] = F"""up_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Any = F"""output_blocks.{current_layer}.0"""
_lowerCamelCase : List[str] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
_lowerCamelCase : int = F"""up_blocks.{i}.attentions.{j}"""
_lowerCamelCase : Optional[int] = F"""output_blocks.{current_layer}.1"""
_lowerCamelCase : Union[str, Any] = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_lowerCamelCase : Union[str, Any] = F"""up_blocks.{i}.upsamplers.0"""
_lowerCamelCase : Any = F"""output_blocks.{current_layer-1}.2"""
_lowerCamelCase : Union[str, Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : Dict = checkpoint["""out.0.weight"""]
_lowerCamelCase : Optional[Any] = checkpoint["""out.0.bias"""]
_lowerCamelCase : int = checkpoint["""out.2.weight"""]
_lowerCamelCase : int = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
lowerCAmelCase : List[str] =parser.parse_args()
lowerCAmelCase : Tuple =strabool(args.class_cond)
lowerCAmelCase : Optional[Any] =os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase : List[Any] =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase : Dict =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase : List[Any] =TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase : List[str] =None
lowerCAmelCase : Tuple =con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase : Optional[Any] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase : List[Any] =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase : List[Any] =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase : List[Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase : List[str] =CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase : int =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=13 , _UpperCamelCase : Dict=7 , _UpperCamelCase : Any=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : str=99 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Optional[Any]=37 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : int=16 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : str=4 , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=1000 , ) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Optional[int] = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : Union[str, Any] = num_choices
_lowerCamelCase : str = scope
_lowerCamelCase : str = range_bbox
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : List[Any] = bbox[i, j, 3]
_lowerCamelCase : Tuple = bbox[i, j, 1]
_lowerCamelCase : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : int = bbox[i, j, 2]
_lowerCamelCase : List[Any] = bbox[i, j, 0]
_lowerCamelCase : List[Any] = t
_lowerCamelCase : List[Any] = tf.convert_to_tensor(UpperCAmelCase__)
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Dict = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Tuple = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : str = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = TFLayoutLMModel(config=UpperCAmelCase__)
_lowerCamelCase : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_lowerCamelCase : Tuple = model(UpperCAmelCase__ , UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_lowerCamelCase : Dict = model(UpperCAmelCase__ , UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = TFLayoutLMForMaskedLM(config=UpperCAmelCase__)
_lowerCamelCase : Optional[int] = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : List[Any] = TFLayoutLMForSequenceClassification(config=UpperCAmelCase__)
_lowerCamelCase : Any = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : int = TFLayoutLMForTokenClassification(config=UpperCAmelCase__)
_lowerCamelCase : Optional[int] = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = TFLayoutLMForQuestionAnswering(config=UpperCAmelCase__)
_lowerCamelCase : Dict = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Any = config_and_inputs
_lowerCamelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_snake_case = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = True
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Any = TFLayoutLMModelTester(self)
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = TFLayoutLMModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@unittest.skip("""Onnx compliancy broke with TF 2.10""")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
"""simple docstring"""
pass
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowerCamelCase : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowerCamelCase : int = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowerCamelCase : str = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""")
_lowerCamelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCamelCase : Dict = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
# test the sequence output on [0, :3, :3]
_lowerCamelCase : Tuple = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-3))
# test the pooled output on [1, :3]
_lowerCamelCase : Dict = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCAmelCase__ , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2)
_lowerCamelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCamelCase : Tuple = model(
input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=tf.convert_to_tensor([1, 1]) , )
# test whether we get a loss as a scalar
_lowerCamelCase : Optional[Any] = outputs.loss
_lowerCamelCase : Optional[int] = (2,)
self.assertEqual(loss.shape , UpperCAmelCase__)
# test the shape of the logits
_lowerCamelCase : Union[str, Any] = outputs.logits
_lowerCamelCase : Optional[int] = (2, 2)
self.assertEqual(logits.shape , UpperCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
_lowerCamelCase : List[str] = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13)
_lowerCamelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCamelCase : Tuple = model(
input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
# test the shape of the logits
_lowerCamelCase : str = outputs.logits
_lowerCamelCase : List[str] = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape , UpperCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""")
_lowerCamelCase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCamelCase : Union[str, Any] = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
# test the shape of the logits
_lowerCamelCase : Optional[int] = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape , UpperCAmelCase__)
self.assertEqual(outputs.end_logits.shape , UpperCAmelCase__)
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] = None , ) ->Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__)
# create a imagenet -> id dictionary for easier use
_lowerCamelCase : Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""","""):
_lowerCamelCase : Optional[int] = int(snake_case__)
_lowerCamelCase : Optional[Any] = dict(sorted(self.labels.items()))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__):
_lowerCamelCase : int = list(snake_case__)
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple = 4.0 , _UpperCamelCase : int = None , _UpperCamelCase : List[str] = 50 , _UpperCamelCase : Any = "pil" , _UpperCamelCase : Dict = True , ) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = len(snake_case__)
_lowerCamelCase : Any = self.transformer.config.sample_size
_lowerCamelCase : List[Any] = self.transformer.config.in_channels
_lowerCamelCase : List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
_lowerCamelCase : str = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_lowerCamelCase : List[str] = torch.tensor(snake_case__ , device=self.device).reshape(-1)
_lowerCamelCase : Optional[Any] = torch.tensor([1000] * batch_size , device=self.device)
_lowerCamelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_lowerCamelCase : Optional[Any] = latent_model_input[: len(snake_case__) // 2]
_lowerCamelCase : Tuple = torch.cat([half, half] , dim=0)
_lowerCamelCase : Optional[int] = self.scheduler.scale_model_input(snake_case__ , snake_case__)
_lowerCamelCase : Union[str, Any] = t
if not torch.is_tensor(snake_case__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCamelCase : Any = latent_model_input.device.type == "mps"
if isinstance(snake_case__ , snake_case__):
_lowerCamelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
_lowerCamelCase : Tuple = torch.intaa if is_mps else torch.intaa
_lowerCamelCase : List[Any] = torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_lowerCamelCase : Dict = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCamelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_lowerCamelCase : Optional[Any] = self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__).sample
# perform guidance
if guidance_scale > 1:
_lowerCamelCase : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCamelCase : Dict = torch.split(snake_case__ , len(snake_case__) // 2 , dim=0)
_lowerCamelCase : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCamelCase : Tuple = torch.cat([half_eps, half_eps] , dim=0)
_lowerCamelCase : Optional[int] = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCamelCase : Dict = torch.split(snake_case__ , snake_case__ , dim=1)
else:
_lowerCamelCase : str = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(snake_case__ , snake_case__ , snake_case__).prev_sample
if guidance_scale > 1:
_lowerCamelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
_lowerCamelCase : List[str] = latent_model_input
_lowerCamelCase : Any = 1 / self.vae.config.scaling_factor * latents
_lowerCamelCase : Dict = self.vae.decode(snake_case__).sample
_lowerCamelCase : Dict = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCamelCase : Tuple = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_lowerCamelCase : Tuple = self.numpy_to_pil(snake_case__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__)
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase : int =TypeVar("_T")
class __snake_case ( Generic[_T] ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : List[Any] = None) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(iterable or [])
_lowerCamelCase : str = []
def __len__( self : Dict) ->Union[str, Any]:
"""simple docstring"""
return len(self._stacka) + len(self._stacka)
def __repr__( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka)})"""
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int) ->Tuple:
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE_)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self._stacka.pop
_lowerCamelCase : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop())
if not self._stacka:
raise IndexError("""Queue is empty""")
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __snake_case ( _snake_case ):
'''simple docstring'''
_snake_case = """pix2struct_text_model"""
_snake_case = ["""past_key_values"""]
_snake_case = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCamelCase : List[str]=5_0244 , _UpperCamelCase : str=768 , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : Optional[int]=2048 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Dict=128 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Any=1E-6 , _UpperCamelCase : List[Any]=1.0 , _UpperCamelCase : Any="gelu_new" , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=0 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : List[str]=False , _UpperCamelCase : List[str]=True , **_UpperCamelCase : str , ) ->str:
"""simple docstring"""
_lowerCamelCase : int = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : int = d_kv
_lowerCamelCase : Any = d_ff
_lowerCamelCase : int = num_layers
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : Any = relative_attention_num_buckets
_lowerCamelCase : Union[str, Any] = relative_attention_max_distance
_lowerCamelCase : str = dropout_rate
_lowerCamelCase : str = layer_norm_epsilon
_lowerCamelCase : Optional[Any] = initializer_factor
_lowerCamelCase : Any = use_cache
_lowerCamelCase : Dict = eos_token_id
_lowerCamelCase : Dict = decoder_start_token_id
# for backwards compatibility
_lowerCamelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCamelCase : Any , **_UpperCamelCase : Optional[int]) ->Tuple:
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_)
_lowerCamelCase : Dict = cls.get_config_dict(snake_case_ , **snake_case_)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""") == "pix2struct":
_lowerCamelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(snake_case_ , **snake_case_)
class __snake_case ( _snake_case ):
'''simple docstring'''
_snake_case = """pix2struct_vision_model"""
def __init__( self : Optional[int] , _UpperCamelCase : List[Any]=768 , _UpperCamelCase : str=768 , _UpperCamelCase : Optional[Any]=2048 , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]="gelu_new" , _UpperCamelCase : str=1E-6 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : List[Any]=1E-1_0 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : str=4096 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : int=128 , **_UpperCamelCase : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**snake_case_)
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Dict = patch_embed_hidden_size
_lowerCamelCase : Tuple = d_ff
_lowerCamelCase : Tuple = dropout_rate
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : List[str] = initializer_factor
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = dense_act_fn
_lowerCamelCase : int = seq_len
_lowerCamelCase : Optional[Any] = relative_attention_num_buckets
_lowerCamelCase : str = relative_attention_max_distance
_lowerCamelCase : List[Any] = d_kv
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , _UpperCamelCase : Tuple , **_UpperCamelCase : str) ->int:
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_)
_lowerCamelCase : str = cls.get_config_dict(snake_case_ , **snake_case_)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""") == "pix2struct":
_lowerCamelCase : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(snake_case_ , **snake_case_)
class __snake_case ( _snake_case ):
'''simple docstring'''
_snake_case = """pix2struct"""
_snake_case = True
def __init__( self : List[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : Optional[Any] , ) ->int:
"""simple docstring"""
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_)
if text_config is None:
_lowerCamelCase : List[str] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""")
if vision_config is None:
_lowerCamelCase : Optional[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""")
_lowerCamelCase : Tuple = PixaStructTextConfig(**snake_case_)
_lowerCamelCase : Optional[int] = PixaStructVisionConfig(**snake_case_)
_lowerCamelCase : List[str] = self.text_config.decoder_start_token_id
_lowerCamelCase : List[str] = self.text_config.pad_token_id
_lowerCamelCase : Dict = self.text_config.eos_token_id
_lowerCamelCase : Union[str, Any] = initializer_factor
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Any = self.initializer_range
_lowerCamelCase : Optional[int] = self.initializer_range
_lowerCamelCase : List[str] = is_vqa
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , **_UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Any:
"""simple docstring"""
_lowerCamelCase : List[str] = copy.deepcopy(self.__dict__)
_lowerCamelCase : str = self.text_config.to_dict()
_lowerCamelCase : Optional[int] = self.vision_config.to_dict()
_lowerCamelCase : Optional[int] = self.__class__.model_type
return output
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Any=99 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Any=4 , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Optional[Any]=16 , _UpperCamelCase : int=2 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Optional[Any]=None , ) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : List[Any] = use_input_mask
_lowerCamelCase : Union[str, Any] = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Dict = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = NystromformerModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_lowerCamelCase : Any = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_lowerCamelCase : Any = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = NystromformerForMaskedLM(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = NystromformerForQuestionAnswering(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : int = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Any = NystromformerForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : Optional[Any] = NystromformerForTokenClassification(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = self.num_choices
_lowerCamelCase : List[str] = NystromformerForMultipleChoice(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Union[str, Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = NystromformerModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Dict = type
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = NystromformerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
_lowerCamelCase : Tuple = model(UpperCAmelCase__)[0]
_lowerCamelCase : int = torch.Size((1, 6, 768))
self.assertEqual(output.shape , UpperCAmelCase__)
_lowerCamelCase : Dict = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''the [MASK] of Belgium is Brussels'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : Optional[int] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : List[Any] = tokenizer(UpperCAmelCase__ , return_tensors="""pt""")
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(encoding.input_ids).logits
_lowerCamelCase : Tuple = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase__) , """capital""")
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_snake_case = "xglm"
_snake_case = ["past_key_values"]
_snake_case = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , _UpperCamelCase : List[str]=25_6008 , _UpperCamelCase : Any=2048 , _UpperCamelCase : List[str]=1024 , _UpperCamelCase : List[str]=4096 , _UpperCamelCase : List[str]=24 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : List[str]=2 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int=2 , **_UpperCamelCase : Tuple , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = d_model
_lowerCamelCase : Any = ffn_dim
_lowerCamelCase : Optional[Any] = num_layers
_lowerCamelCase : List[Any] = attention_heads
_lowerCamelCase : Optional[Any] = activation_function
_lowerCamelCase : Dict = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : List[str] = activation_dropout
_lowerCamelCase : Optional[int] = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Union[str, Any] = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase : Optional[Any] ={
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __snake_case ( lowerCamelCase__ ):
'''simple docstring'''
_snake_case : Dict = 'facebook/nllb-200-distilled-600M'
_snake_case : int = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
_snake_case : str = 'translator'
_snake_case : List[Any] = AutoTokenizer
_snake_case : Optional[Any] = AutoModelForSeqaSeqLM
_snake_case : Any = LANGUAGE_CODES
_snake_case : Optional[int] = ['text', 'text', 'text']
_snake_case : List[Any] = ['text']
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->int:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""")
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""")
_lowerCamelCase : List[str] = self.lang_to_code[src_lang]
_lowerCamelCase : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowerCamelCase , return_tensors="""pt""" , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[Any]) ->int:
"""simple docstring"""
return self.model.generate(**__lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Dict) ->Any:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCamelCase)
| 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
def A__ ( ):
'''simple docstring'''
return 1
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def A__ ( __A = 200 ):
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase : List[str] =logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Union[str, Any] , _UpperCamelCase : Any = True , _UpperCamelCase : List[Any] = 1 / 255 , _UpperCamelCase : Tuple = True , _UpperCamelCase : Any = 8 , **_UpperCamelCase : Tuple , ) ->Tuple:
"""simple docstring"""
super().__init__(**lowercase_)
_lowerCamelCase : int = do_rescale
_lowerCamelCase : Dict = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Dict = pad_size
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] = None , **_UpperCamelCase : Dict) ->Dict:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] = None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = get_image_size(lowercase_)
_lowerCamelCase : Optional[Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Any = (old_width // size + 1) * size - old_width
return pad(lowercase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : Union[str, Any] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : Tuple = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Any] = ChannelDimension.FIRST , **_UpperCamelCase : Union[str, Any] , ) ->int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : int = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Optional[int] = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(lowercase_) for image in images]
if do_rescale:
_lowerCamelCase : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase_ , size=lowercase_) for image in images]
_lowerCamelCase : int = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_lowerCamelCase : Dict = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
from torch import nn
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str) ->Union[str, Any]:
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = class_size
_lowerCamelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCamelCase : Union[str, Any] = nn.Linear(A_ , A_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.mlp(A_)
return logits
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
from __future__ import annotations
def A__ ( __A ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A__ ( __A ):
'''simple docstring'''
def wrapper(*__A , **__A ):
_lowerCamelCase : int = timeit.default_timer()
_lowerCamelCase : Optional[int] = func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = timeit.default_timer() - starttime
return delta
_lowerCamelCase : Optional[Any] = func.__name__
return wrapper
def A__ ( __A , __A=100 , __A=None ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : Dict = seq_shapes or {}
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _ArrayXD ):
_lowerCamelCase : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_SCREAMING_SNAKE_CASE , datasets.Value ):
if v.dtype == "string":
_lowerCamelCase : Optional[Any] = """The small grey turtle was surprisingly fast when challenged."""
else:
_lowerCamelCase : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_SCREAMING_SNAKE_CASE , datasets.Sequence ):
while isinstance(_SCREAMING_SNAKE_CASE , datasets.Sequence ):
_lowerCamelCase : Dict = v.feature
_lowerCamelCase : Dict = seq_shapes[k]
_lowerCamelCase : List[Any] = np.random.rand(*_SCREAMING_SNAKE_CASE ).astype(v.dtype )
_lowerCamelCase : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def A__ ( __A , __A , __A=100 , __A=None ):
'''simple docstring'''
_lowerCamelCase : str = generate_examples(_SCREAMING_SNAKE_CASE , num_examples=_SCREAMING_SNAKE_CASE , seq_shapes=_SCREAMING_SNAKE_CASE )
with ArrowWriter(features=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE ) as writer:
for key, record in dummy_data:
_lowerCamelCase : Tuple = features.encode_example(_SCREAMING_SNAKE_CASE )
writer.write(_SCREAMING_SNAKE_CASE )
_lowerCamelCase , _lowerCamelCase : Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
_lowerCamelCase : List[Any] = datasets.Dataset.from_file(filename=_SCREAMING_SNAKE_CASE , info=datasets.DatasetInfo(features=_SCREAMING_SNAKE_CASE ) )
return dataset
| 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase : Any =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A__ ( __A , __A , __A , __A , __A ):
for attribute in key.split(""".""" ):
_lowerCamelCase : str = getattr(__A , __A )
if weight_type is not None:
_lowerCamelCase : List[str] = getattr(__A , __A ).shape
else:
_lowerCamelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCamelCase : Any = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : str = value
elif weight_type == "bias":
_lowerCamelCase : Union[str, Any] = value
else:
_lowerCamelCase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A__ ( __A , __A ):
_lowerCamelCase : Dict = []
_lowerCamelCase : int = fairseq_model.state_dict()
_lowerCamelCase : Any = hf_model.feature_extractor
_lowerCamelCase : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
_lowerCamelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == """group""" , )
_lowerCamelCase : Union[str, Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__A , __A , __A , __A )
_lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCamelCase : Dict = True
if "*" in mapped_key:
_lowerCamelCase : Union[str, Any] = name.split(__A )[0].split(""".""" )[-2]
_lowerCamelCase : Dict = mapped_key.replace("""*""" , __A )
if "weight_g" in name:
_lowerCamelCase : Any = """weight_g"""
elif "weight_v" in name:
_lowerCamelCase : int = """weight_v"""
elif "bias" in name:
_lowerCamelCase : Any = """bias"""
elif "weight" in name:
_lowerCamelCase : int = """weight"""
else:
_lowerCamelCase : int = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A__ ( __A , __A , __A , __A , __A ):
_lowerCamelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
_lowerCamelCase : Union[str, Any] = name.split(""".""" )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCamelCase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCamelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def A__ ( __A , __A , __A , __A ):
_lowerCamelCase : Any = full_name.split("""adaptor.""" )[-1]
_lowerCamelCase : Optional[Any] = name.split(""".""" )
if items[1].isdigit():
_lowerCamelCase : Tuple = int(items[1] )
else:
_lowerCamelCase : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
_lowerCamelCase : List[str] = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
_lowerCamelCase : Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
_lowerCamelCase : List[Any] = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
_lowerCamelCase : Dict = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__A , __A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
_lowerCamelCase : List[str] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
_lowerCamelCase : Tuple = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
def A__ ( __A ):
_lowerCamelCase : Any = emb.weight.shape
_lowerCamelCase : Any = nn.Linear(__A , __A , bias=__A )
_lowerCamelCase : Any = emb.weight.data
return lin_layer
@torch.no_grad()
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
_lowerCamelCase : Tuple = WavaVecaConfig.from_pretrained(
__A , add_adapter=__A , adapter_stride=__A , adapter_kernel_size=__A , use_auth_token=__A , output_hidden_size=__A , )
_lowerCamelCase : int = MBartConfig.from_pretrained(__A )
# load model
_lowerCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
_lowerCamelCase : Optional[Any] = model[0].eval()
# load feature extractor
_lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(__A , use_auth_token=__A )
# set weights for wav2vec2 encoder
_lowerCamelCase : str = WavaVecaModel(__A )
recursively_load_weights_wavaveca(model.encoder , __A )
# load decoder weights
_lowerCamelCase : List[str] = MBartForCausalLM(__A )
_lowerCamelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_lowerCamelCase : Optional[Any] = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[int] = MBartaaTokenizer(__A )
tokenizer.save_pretrained(__A )
_lowerCamelCase : Optional[int] = hf_wavavec.config.to_dict()
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer.eos_token_id
_lowerCamelCase : List[str] = """mbart50"""
_lowerCamelCase : Optional[Any] = """wav2vec2"""
_lowerCamelCase : Optional[int] = tokenizer.eos_token_id
_lowerCamelCase : int = 250_004
_lowerCamelCase : List[str] = tokenizer.eos_token_id
_lowerCamelCase : Dict = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCAmelCase : Tuple =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import math
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : Optional[Any] = 2
_lowerCamelCase : Optional[int] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
_lowerCamelCase : Optional[int] = [True] * (end + 1)
_lowerCamelCase : str = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
_lowerCamelCase : Union[str, Any] = False
start += 1
prime += in_prime
_lowerCamelCase : List[Any] = end + 1
_lowerCamelCase : Union[str, Any] = min(2 * end , lowerCAmelCase__ )
while low <= n:
_lowerCamelCase : Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
_lowerCamelCase : Optional[int] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
_lowerCamelCase : Optional[Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCamelCase : str = high + 1
_lowerCamelCase : Dict = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(10**6))
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Any , ) ->Any:
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Dict = 13
_lowerCamelCase : Tuple = 7
_lowerCamelCase : Any = True
_lowerCamelCase : Tuple = True
_lowerCamelCase : Tuple = True
_lowerCamelCase : Dict = 99
_lowerCamelCase : List[str] = 32
_lowerCamelCase : int = 2
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Tuple = 37
_lowerCamelCase : Optional[int] = """gelu"""
_lowerCamelCase : Union[str, Any] = 0.1
_lowerCamelCase : Tuple = 0.1
_lowerCamelCase : Union[str, Any] = 512
_lowerCamelCase : Any = 16
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Any = 0.0_2
_lowerCamelCase : List[str] = 3
_lowerCamelCase : int = 4
_lowerCamelCase : Optional[int] = None
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : Tuple = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : int = self.prepare_config_and_inputs()
_lowerCamelCase : Tuple = True
_lowerCamelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[int] = TFEsmModel(config=lowerCamelCase_)
_lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : List[str] = model(lowerCamelCase_)
_lowerCamelCase : str = [input_ids, input_mask]
_lowerCamelCase : str = model(lowerCamelCase_)
_lowerCamelCase : Tuple = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = True
_lowerCamelCase : Any = TFEsmModel(config=lowerCamelCase_)
_lowerCamelCase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCamelCase : List[str] = model(lowerCamelCase_)
_lowerCamelCase : Dict = [input_ids, input_mask]
_lowerCamelCase : Tuple = model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_)
# Also check the case where encoder outputs are not passed
_lowerCamelCase : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple) ->Dict:
"""simple docstring"""
_lowerCamelCase : Any = TFEsmForMaskedLM(config=lowerCamelCase_)
_lowerCamelCase : int = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Union[str, Any] = TFEsmForTokenClassification(config=lowerCamelCase_)
_lowerCamelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : str = model(lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : int = config_and_inputs
_lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = TFEsmModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFEsmModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
@unittest.skip("""Protein models do not support embedding resizing.""")
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(lowerCamelCase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCamelCase : Optional[Any] = model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable)
else:
_lowerCamelCase : int = model.get_output_embeddings()
assert x is None
_lowerCamelCase : str = model.get_bias()
assert name is None
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : List[Any] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""")
_lowerCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCamelCase : Dict = model(lowerCamelCase_)[0]
_lowerCamelCase : Dict = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , lowerCamelCase_)
# compare the actual values for a slice.
_lowerCamelCase : Optional[Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""")
_lowerCamelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowerCamelCase : Union[str, Any] = model(lowerCamelCase_)[0]
# compare the actual values for a slice.
_lowerCamelCase : int = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[str] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class __snake_case ( lowercase_ ):
'''simple docstring'''
_snake_case = '''swinv2'''
_snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , _UpperCamelCase : Dict=224 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : List[str]=96 , _UpperCamelCase : str=[2, 2, 6, 2] , _UpperCamelCase : Tuple=[3, 6, 12, 24] , _UpperCamelCase : List[str]=7 , _UpperCamelCase : str=4.0 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=1E-5 , _UpperCamelCase : Dict=32 , **_UpperCamelCase : str , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase__)
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : Dict = depths
_lowerCamelCase : str = len(UpperCamelCase__)
_lowerCamelCase : int = num_heads
_lowerCamelCase : Optional[Any] = window_size
_lowerCamelCase : Any = mlp_ratio
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : str = drop_path_rate
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Optional[int] = use_absolute_embeddings
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : Dict = int(embed_dim * 2 ** (len(UpperCamelCase__) - 1))
_lowerCamelCase : Optional[Any] = (0, 0, 0, 0)
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[str]=13 , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Tuple=33 , _UpperCamelCase : Dict=32 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : int=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : int=4 , _UpperCamelCase : Any=None , ) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : str = use_input_mask
_lowerCamelCase : int = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : Optional[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = EsmModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_lowerCamelCase : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
_lowerCamelCase : List[Any] = model(lowerCAmelCase_)
_lowerCamelCase : Dict = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = EsmForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_lowerCamelCase : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : Tuple = EsmForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_lowerCamelCase : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Tuple = config_and_inputs
_lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = False
_snake_case = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = ()
_snake_case = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = EsmModelTester(self)
_lowerCamelCase : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = EsmModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
_lowerCamelCase : Optional[Any] = EsmEmbeddings(config=lowerCAmelCase_)
_lowerCamelCase : int = torch.as_tensor([[12, 31, 13, model.padding_idx]])
_lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
])
_lowerCamelCase : Tuple = create_position_ids_from_input_ids(lowerCAmelCase_ , model.padding_idx)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_)))
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
_lowerCamelCase : int = EsmEmbeddings(config=lowerCAmelCase_)
_lowerCamelCase : List[str] = torch.empty(2 , 4 , 30)
_lowerCamelCase : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_lowerCamelCase : List[Any] = torch.as_tensor([expected_single_positions, expected_single_positions])
_lowerCamelCase : Optional[int] = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase_)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_)))
@unittest.skip("""Esm does not support embedding resizing""")
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
pass
@require_torch
class __snake_case ( __UpperCAmelCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : int = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""")
model.eval()
_lowerCamelCase : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]])
_lowerCamelCase : Union[str, Any] = model(lowerCAmelCase_)[0]
_lowerCamelCase : str = 33
_lowerCamelCase : Optional[Any] = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape , lowerCAmelCase_)
_lowerCamelCase : Any = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : List[str] = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""")
model.eval()
_lowerCamelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowerCamelCase : Optional[Any] = model(lowerCAmelCase_)[0]
# compare the actual values for a slice.
_lowerCamelCase : str = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4))
| 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=99 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : Dict=4 , _UpperCamelCase : List[str]=37 , _UpperCamelCase : int="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : int=16 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : Any=4 , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Dict = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[Any] = num_choices
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : List[str] = None
if self.use_attention_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : int = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Any) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[int] = config_and_inputs
_lowerCamelCase : List[str] = True
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __snake_case ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = FlaxRobertaPreLayerNormModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_UpperCAmelCase)
_lowerCamelCase : List[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(_UpperCAmelCase)
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_UpperCAmelCase)
_lowerCamelCase : Optional[int] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa)
_lowerCamelCase : Dict = model(_UpperCAmelCase)[0]
_lowerCamelCase : Any = [1, 11, 5_0265]
self.assertEqual(list(output.shape) , _UpperCAmelCase)
# compare the actual values for a slice.
_lowerCamelCase : int = np.array(
[[[40.4880, 18.0199, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 10.7085], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_UpperCAmelCase)
_lowerCamelCase : str = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa)
_lowerCamelCase : Optional[Any] = model(_UpperCAmelCase)[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4))
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A__ ( __A=None , __A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case = field(
default=_a , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case = field(
default=_a , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case = field(
default=_a , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case = field(
default=_a , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case = field(
default=_a , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case = list_field(
default=_a , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def A__ ( __A ):
'''simple docstring'''
try:
int(UpperCAmelCase__ )
return True
except ValueError:
return False
def A__ ( __A ):
'''simple docstring'''
try:
float(UpperCAmelCase__ )
return True
except ValueError:
return False
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = args
_lowerCamelCase : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="""""") as csv_file:
_lowerCamelCase : Optional[int] = csv.DictReader(_A)
for row in reader:
_lowerCamelCase : List[str] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""]))
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""]))
if can_convert_to_int(row["""result"""]):
# value is not None
_lowerCamelCase : List[Any] = int(row["""result"""])
elif can_convert_to_float(row["""result"""]):
# value is not None
_lowerCamelCase : str = float(row["""result"""])
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = plt.subplots()
_lowerCamelCase : Tuple = """Time usage""" if self.args.is_time else """Memory usage"""
_lowerCamelCase : Union[str, Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""")
ax.set_yscale("""log""")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
_lowerCamelCase : Optional[int] = sorted(set(self.result_dict[model_name]["""bsz"""]))
_lowerCamelCase : Any = sorted(set(self.result_dict[model_name]["""seq_len"""]))
_lowerCamelCase : Optional[int] = self.result_dict[model_name]["""result"""]
((_lowerCamelCase) , (_lowerCamelCase)) : Dict = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_lowerCamelCase : str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_lowerCamelCase : Union[str, Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
_lowerCamelCase : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_lowerCamelCase) , (_lowerCamelCase)) : Any = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_lowerCamelCase : Optional[int] = np.asarray(_A , _A)[: len(_A)]
plt.scatter(
_A , _A , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""")
plt.plot(_A , _A , """--""")
title_str += F""" {label_model_name} vs."""
_lowerCamelCase : int = title_str[:-4]
_lowerCamelCase : Tuple = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_A)
plt.xlabel(_A)
plt.ylabel(_A)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = HfArgumentParser(UpperCAmelCase__ )
_lowerCamelCase : str = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : List[Any] = Plot(args=UpperCAmelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase : Tuple ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCAmelCase : Any ={"facebook/blenderbot-3B": 128}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['input_ids', 'attention_mask']
_snake_case = BlenderbotTokenizer
def __init__( self : str , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]="replace" , _UpperCamelCase : int="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Any="<s>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : Dict="<pad>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=True , **_UpperCamelCase : List[Any] , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , __UpperCamelCase) != add_prefix_space:
_lowerCamelCase : Tuple = getattr(__UpperCamelCase , pre_tok_state.pop("""type"""))
_lowerCamelCase : Tuple = add_prefix_space
_lowerCamelCase : str = pre_tok_class(**__UpperCamelCase)
_lowerCamelCase : str = add_prefix_space
_lowerCamelCase : List[Any] = """post_processor"""
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase)
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : int = tuple(state["""sep"""])
if "cls" in state:
_lowerCamelCase : str = tuple(state["""cls"""])
_lowerCamelCase : Any = False
if state.get("""add_prefix_space""" , __UpperCamelCase) != add_prefix_space:
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : int = True
if state.get("""trim_offsets""" , __UpperCamelCase) != trim_offsets:
_lowerCamelCase : int = trim_offsets
_lowerCamelCase : Union[str, Any] = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__UpperCamelCase , state.pop("""type"""))
_lowerCamelCase : Union[str, Any] = component_class(**__UpperCamelCase)
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase) if isinstance(__UpperCamelCase , __UpperCamelCase) else value
_lowerCamelCase : int = value
def _SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[int]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , __UpperCamelCase)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , *_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = kwargs.get("""is_split_into_words""" , __UpperCamelCase)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase)
return tuple(__UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->int:
"""simple docstring"""
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : "Conversation") ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text)
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase)
_lowerCamelCase : Dict = """ """.join(__UpperCamelCase)
_lowerCamelCase : Union[str, Any] = self.encode(__UpperCamelCase)
if len(__UpperCamelCase) > self.model_max_length:
_lowerCamelCase : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""")
return input_ids
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
def A__ ( __A = 10 , __A = 22 ):
'''simple docstring'''
_lowerCamelCase : List[str] = range(1 , _UpperCamelCase )
_lowerCamelCase : List[str] = range(1 , _UpperCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : Tuple ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any ={
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Tuple ={
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
lowerCAmelCase : List[Any] ={
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class __snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ElectraTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Optional[Any]="[SEP]" , _UpperCamelCase : Optional[int]="[PAD]" , _UpperCamelCase : Tuple="[CLS]" , _UpperCamelCase : Optional[int]="[MASK]" , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : List[Any] , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , _UpperCamelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCamelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCamelCase) != tokenize_chinese_chars
):
_lowerCamelCase : List[Any] = getattr(_UpperCamelCase , normalizer_state.pop("""type"""))
_lowerCamelCase : List[str] = do_lower_case
_lowerCamelCase : Optional[int] = strip_accents
_lowerCamelCase : Optional[int] = tokenize_chinese_chars
_lowerCamelCase : List[str] = normalizer_class(**_UpperCamelCase)
_lowerCamelCase : int = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] = None) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int , _UpperCamelCase : str = None) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase)
return tuple(_UpperCamelCase)
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase : Dict =False
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : str ="ybelkada/fonts"
def A__ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def A__ ( __A , __A , __A ):
'''simple docstring'''
requires_backends(__A , ["""torch"""] )
_check_torch_version()
_lowerCamelCase : Optional[int] = image_tensor.unsqueeze(0 )
_lowerCamelCase : Optional[int] = torch.nn.functional.unfold(__A , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_lowerCamelCase : Dict = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __A , __A , -1 )
_lowerCamelCase : str = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A__ ( __A , __A = 36 , __A = "black" , __A = "white" , __A = 5 , __A = 5 , __A = 5 , __A = 5 , __A = None , __A = None , ):
'''simple docstring'''
requires_backends(__A , """vision""" )
# Add new lines so that each line is no more than 80 characters.
_lowerCamelCase : Union[str, Any] = textwrap.TextWrapper(width=80 )
_lowerCamelCase : str = wrapper.wrap(text=__A )
_lowerCamelCase : Dict = '''\n'''.join(__A )
if font_bytes is not None and font_path is None:
_lowerCamelCase : Tuple = io.BytesIO(__A )
elif font_path is not None:
_lowerCamelCase : str = font_path
else:
_lowerCamelCase : List[str] = hf_hub_download(__A , """Arial.TTF""" )
_lowerCamelCase : List[Any] = ImageFont.truetype(__A , encoding="""UTF-8""" , size=__A )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCamelCase : Union[str, Any] = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , __A ) )
_lowerCamelCase : Union[str, Any] = temp_draw.textbbox((0, 0) , __A , __A )
# Create the actual image with a bit of padding around the text.
_lowerCamelCase : Optional[Any] = text_width + left_padding + right_padding
_lowerCamelCase : Dict = text_height + top_padding + bottom_padding
_lowerCamelCase : Dict = Image.new("""RGB""" , (image_width, image_height) , __A )
_lowerCamelCase : int = ImageDraw.Draw(__A )
draw.text(xy=(left_padding, top_padding) , text=__A , fill=__A , font=__A )
return image
def A__ ( __A , __A , **__A ):
'''simple docstring'''
requires_backends(__A , """vision""" )
# Convert to PIL image if necessary
_lowerCamelCase : Union[str, Any] = to_pil_image(__A )
_lowerCamelCase : List[str] = render_text(__A , **__A )
_lowerCamelCase : Union[str, Any] = max(header_image.width , image.width )
_lowerCamelCase : Optional[Any] = int(image.height * (new_width / image.width) )
_lowerCamelCase : Any = int(header_image.height * (new_width / header_image.width) )
_lowerCamelCase : Dict = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCamelCase : Tuple = to_numpy_array(__A )
if infer_channel_dimension_format(__A ) == ChannelDimension.LAST:
_lowerCamelCase : List[Any] = to_channel_dimension_format(__A , ChannelDimension.LAST )
return new_image
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ["flattened_patches"]
def __init__( self : Dict , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : int = 2048 , _UpperCamelCase : bool = False , **_UpperCamelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
super().__init__(**lowerCamelCase__)
_lowerCamelCase : str = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : int = do_convert_rgb
_lowerCamelCase : List[Any] = max_patches
_lowerCamelCase : Tuple = is_vqa
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : dict , **_UpperCamelCase : int) ->np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , """torch""")
_check_torch_version()
# convert to torch
_lowerCamelCase : Optional[int] = to_channel_dimension_format(lowerCamelCase__ , ChannelDimension.FIRST)
_lowerCamelCase : List[Any] = torch.from_numpy(lowerCamelCase__)
_lowerCamelCase : Optional[Any] = patch_size['''height'''], patch_size['''width''']
_lowerCamelCase : int = get_image_size(lowerCamelCase__)
# maximize scale s.t.
_lowerCamelCase : Optional[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
_lowerCamelCase : Dict = max(min(math.floor(scale * image_height / patch_height) , lowerCamelCase__) , 1)
_lowerCamelCase : Union[str, Any] = max(min(math.floor(scale * image_width / patch_width) , lowerCamelCase__) , 1)
_lowerCamelCase : List[Any] = max(num_feasible_rows * patch_height , 1)
_lowerCamelCase : Any = max(num_feasible_cols * patch_width , 1)
_lowerCamelCase : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=lowerCamelCase__ , antialias=lowerCamelCase__ , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCamelCase : str = torch_extract_patches(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
_lowerCamelCase : int = patches.shape
_lowerCamelCase : Tuple = patches_shape[1]
_lowerCamelCase : Tuple = patches_shape[2]
_lowerCamelCase : Tuple = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCamelCase : Tuple = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
_lowerCamelCase : int = torch.arange(lowerCamelCase__).reshape([rows, 1]).repeat(1 , lowerCamelCase__).reshape([rows * columns, 1])
_lowerCamelCase : Union[str, Any] = torch.arange(lowerCamelCase__).reshape([1, columns]).repeat(lowerCamelCase__ , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCamelCase : Any = row_ids.to(torch.floataa)
_lowerCamelCase : Any = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Union[str, Any] = torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : List[str] = torch.nn.functional.pad(lowerCamelCase__ , [0, 0, 0, max_patches - (rows * columns)]).float()
_lowerCamelCase : Any = to_numpy_array(lowerCamelCase__)
return result
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str]) ->np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
_lowerCamelCase : List[str] = image.astype(np.floataa)
# take mean across the whole `image`
_lowerCamelCase : Dict = np.mean(lowerCamelCase__)
_lowerCamelCase : Dict = np.std(lowerCamelCase__)
_lowerCamelCase : str = max(lowerCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , **lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Dict[str, int]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : Tuple , ) ->ImageInput:
"""simple docstring"""
_lowerCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Any = patch_size if patch_size is not None else self.patch_size
_lowerCamelCase : Dict = max_patches if max_patches is not None else self.max_patches
_lowerCamelCase : Optional[Any] = self.is_vqa
if kwargs.get("""data_format""" , lowerCamelCase__) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """)
_lowerCamelCase : Optional[int] = make_list_of_images(lowerCamelCase__)
if not valid_images(lowerCamelCase__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Union[str, Any] = [convert_to_rgb(lowerCamelCase__) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(lowerCamelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""")
_lowerCamelCase : Optional[int] = kwargs.pop("""font_bytes""" , lowerCamelCase__)
_lowerCamelCase : Any = kwargs.pop("""font_path""" , lowerCamelCase__)
if isinstance(lowerCamelCase__ , lowerCamelCase__):
_lowerCamelCase : Union[str, Any] = [header_text] * len(lowerCamelCase__)
_lowerCamelCase : Dict = [
render_header(lowerCamelCase__ , header_text[i] , font_bytes=lowerCamelCase__ , font_path=lowerCamelCase__)
for i, image in enumerate(lowerCamelCase__)
]
if do_normalize:
_lowerCamelCase : Tuple = [self.normalize(image=lowerCamelCase__) for image in images]
# convert to torch tensor and permute
_lowerCamelCase : List[Any] = [
self.extract_flattened_patches(image=lowerCamelCase__ , max_patches=lowerCamelCase__ , patch_size=lowerCamelCase__)
for image in images
]
# create attention mask in numpy
_lowerCamelCase : Tuple = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
_lowerCamelCase : List[str] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=lowerCamelCase__)
return encoded_outputs
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : str , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : str=30 , _UpperCamelCase : Any=400 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=0.9 , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Dict=[0.5, 0.5, 0.5] , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , ) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 30}
_lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Any = min_resolution
_lowerCamelCase : Optional[int] = max_resolution
_lowerCamelCase : Dict = do_resize_and_center_crop
_lowerCamelCase : Optional[Any] = size
_lowerCamelCase : List[Any] = crop_pct
_lowerCamelCase : List[str] = crop_size
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Dict = image_mean
_lowerCamelCase : Tuple = image_std
def _SCREAMING_SNAKE_CASE ( self : Any) ->Any:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_snake_case = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
_lowerCamelCase : Any = PoolFormerImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCamelCase , """do_resize_and_center_crop"""))
self.assertTrue(hasattr(_UpperCamelCase , """size"""))
self.assertTrue(hasattr(_UpperCamelCase , """crop_pct"""))
self.assertTrue(hasattr(_UpperCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_UpperCamelCase , """image_mean"""))
self.assertTrue(hasattr(_UpperCamelCase , """image_std"""))
def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 30})
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30})
_lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image)
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCamelCase : List[Any] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray)
# Test not batched input
_lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCamelCase : int = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor)
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCamelCase : Any = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : Any =["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def A__ ( __A , __A ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def A__ ( __A ):
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=UpperCamelCase__ )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / """cache"""
_lowerCamelCase : str = test_hf_cache_home / """datasets"""
_lowerCamelCase : str = test_hf_cache_home / """metrics"""
_lowerCamelCase : Optional[int] = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(UpperCamelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(UpperCamelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(UpperCamelCase__ ) )
_lowerCamelCase : int = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
_lowerCamelCase : List[str] = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope="""session""" )
def A__ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def A__ ( __A ):
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , UpperCamelCase__ )
@pytest.fixture
def A__ ( __A ):
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , UpperCamelCase__ )
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( __A , __A ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return False
_lowerCamelCase : Tuple = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase : Dict =input("Enter numbers separated by comma:\n").strip()
lowerCAmelCase : List[str] =[int(item.strip()) for item in user_input.split(",")]
lowerCAmelCase : List[str] =int(input("Enter the number to be found in the list:\n").strip())
lowerCAmelCase : Union[str, Any] ="" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
from collections import defaultdict
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def A__ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase , lowerCAmelCase : Union[str, Any] =10, 9
lowerCAmelCase : List[str] =defaultdict(list)
lowerCAmelCase : Union[str, Any] ={}
lowerCAmelCase : List[Any] =[]
lowerCAmelCase : List[str] =0
lowerCAmelCase : Optional[Any] =[(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
import argparse
import struct
import unittest
class __snake_case :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : bytes) ->None:
"""simple docstring"""
_lowerCamelCase : str = data
# Initialize hash values
_lowerCamelCase : Any = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
_lowerCamelCase : int = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
_lowerCamelCase : Union[str, Any] = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : bytes) ->bytes:
"""simple docstring"""
_lowerCamelCase : Optional[int] = b"""\x80""" + (b"""\x00""" * (63 - (len(_UpperCamelCase) + 8) % 64))
_lowerCamelCase : List[Any] = struct.pack(""">Q""" , (len(_UpperCamelCase) * 8))
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->None:
"""simple docstring"""
_lowerCamelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCamelCase : Dict = list(struct.unpack(""">16L""" , _UpperCamelCase))
# add 48 0-ed integers
words += [0] * 48
_lowerCamelCase : Optional[int] = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCamelCase : Tuple = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
_lowerCamelCase : Any = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
_lowerCamelCase : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCamelCase : Optional[int] = self.ror(_UpperCamelCase , 6) ^ self.ror(_UpperCamelCase , 11) ^ self.ror(_UpperCamelCase , 25)
_lowerCamelCase : str = (e & f) ^ ((~e & 0xffff_ffff) & g)
_lowerCamelCase : Union[str, Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCamelCase : List[str] = self.ror(_UpperCamelCase , 2) ^ self.ror(_UpperCamelCase , 13) ^ self.ror(_UpperCamelCase , 22)
_lowerCamelCase : List[str] = (a & b) ^ (a & c) ^ (b & c)
_lowerCamelCase : Dict = (sa + maj) % 0x1_0000_0000
_lowerCamelCase : List[str] = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCamelCase : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCamelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes)
]
_lowerCamelCase : Union[str, Any] = """""".join([hex(_UpperCamelCase)[2:].zfill(8) for value in self.hashes])
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int) ->int:
"""simple docstring"""
return 0xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->None:
"""simple docstring"""
import hashlib
_lowerCamelCase : str = bytes("""Test String""" , """utf-8""")
self.assertEqual(SHAaaa(_UpperCamelCase).hash , hashlib.shaaaa(_UpperCamelCase).hexdigest())
def A__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Union[str, Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_lowerCamelCase : Tuple = f.read()
else:
_lowerCamelCase : List[Any] = bytes(__A , """utf-8""" )
print(SHAaaa(__A ).hash )
if __name__ == "__main__":
main()
| 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : List[str] =["text", "image", "audio"]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __snake_case :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs"""))
self.assertTrue(hasattr(self.tool , """outputs"""))
_lowerCamelCase : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input , _UpperCamelCase):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
_lowerCamelCase : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = create_inputs(self.tool.inputs)
_lowerCamelCase : Optional[int] = self.tool(*_UpperCamelCase)
# There is a single output
if len(self.tool.outputs) == 1:
_lowerCamelCase : Optional[Any] = [outputs]
self.assertListEqual(output_types(_UpperCamelCase) , self.tool.outputs)
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description"""))
self.assertTrue(hasattr(self.tool , """default_checkpoint"""))
self.assertTrue(self.tool.description.startswith("""This is a tool that"""))
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = create_inputs(self.tool.inputs)
_lowerCamelCase : Union[str, Any] = self.tool(*_UpperCamelCase)
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Union[str, Any] = [outputs]
self.assertEqual(len(_UpperCamelCase) , len(self.tool.outputs))
for output, output_type in zip(_UpperCamelCase , self.tool.outputs):
_lowerCamelCase : str = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = create_inputs(self.tool.inputs)
_lowerCamelCase : str = []
for _input, input_type in zip(_UpperCamelCase , self.tool.inputs):
if isinstance(_UpperCamelCase , _UpperCamelCase):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
_lowerCamelCase : Tuple = self.tool(*_UpperCamelCase)
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : int = [outputs]
self.assertEqual(len(_UpperCamelCase) , len(self.tool.outputs))
| 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase : List[str] =random.Random()
def A__ ( __A , __A=1.0 , __A=None , __A=None ):
'''simple docstring'''
if rng is None:
_lowerCamelCase : str = global_rng
_lowerCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=7 , _UpperCamelCase : Dict=400 , _UpperCamelCase : str=2000 , _UpperCamelCase : str=10 , _UpperCamelCase : Any=160 , _UpperCamelCase : List[Any]=8 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : List[Any]=4000 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Dict=True , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Optional[int] = min_seq_length
_lowerCamelCase : Optional[Any] = max_seq_length
_lowerCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : Optional[int] = padding_value
_lowerCamelCase : Any = sampling_rate
_lowerCamelCase : Optional[Any] = return_attention_mask
_lowerCamelCase : str = do_normalize
_lowerCamelCase : int = feature_size
_lowerCamelCase : Dict = chunk_length
_lowerCamelCase : Optional[int] = hop_length
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Dict=False , _UpperCamelCase : int=False) ->Union[str, Any]:
"""simple docstring"""
def _flatten(_UpperCamelCase : Optional[Any]):
return list(itertools.chain(*_UpperCamelCase))
if equal_length:
_lowerCamelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
_lowerCamelCase : Union[str, Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_lowerCamelCase : Dict = [np.asarray(_UpperCamelCase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = WhisperFeatureExtractor if is_speech_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = WhisperFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[str] = feat_extract_first.save_pretrained(_UpperCamelCase)[0]
check_json_file_has_correct_format(_UpperCamelCase)
_lowerCamelCase : Tuple = self.feature_extraction_class.from_pretrained(_UpperCamelCase)
_lowerCamelCase : Tuple = feat_extract_first.to_dict()
_lowerCamelCase : Any = feat_extract_second.to_dict()
_lowerCamelCase : Optional[Any] = feat_extract_first.mel_filters
_lowerCamelCase : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase))
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] = os.path.join(_UpperCamelCase , """feat_extract.json""")
feat_extract_first.to_json_file(_UpperCamelCase)
_lowerCamelCase : List[str] = self.feature_extraction_class.from_json_file(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = feat_extract_first.to_dict()
_lowerCamelCase : Dict = feat_extract_second.to_dict()
_lowerCamelCase : List[Any] = feat_extract_first.mel_filters
_lowerCamelCase : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase))
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_lowerCamelCase : List[Any] = [np.asarray(_UpperCamelCase) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : Tuple = feature_extractor(_UpperCamelCase , padding="""max_length""" , return_tensors="""np""").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
_lowerCamelCase : Tuple = feature_extractor(speech_inputs[0] , return_tensors="""np""").input_features
_lowerCamelCase : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""").input_features
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
# Test batched
_lowerCamelCase : Dict = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
_lowerCamelCase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Any = [floats_list((1, x))[0] for x in (800, 800, 800)]
_lowerCamelCase : Tuple = np.asarray(_UpperCamelCase)
_lowerCamelCase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
_lowerCamelCase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
# Test truncation required
_lowerCamelCase : Union[str, Any] = [floats_list((1, x))[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200)]
_lowerCamelCase : Optional[int] = [np.asarray(_UpperCamelCase) for speech_input in speech_inputs]
_lowerCamelCase : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase : int = [np.asarray(_UpperCamelCase) for speech_input in speech_inputs_truncated]
_lowerCamelCase : Tuple = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
_lowerCamelCase : Dict = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
import torch
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowerCamelCase : int = np.random.rand(100 , 32).astype(np.floataa)
_lowerCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[str] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
_lowerCamelCase : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""")
# automatic decoding with librispeech
_lowerCamelCase : Any = ds.sort("""id""").select(range(_UpperCamelCase))[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
])
# fmt: on
_lowerCamelCase : Union[str, Any] = self._load_datasamples(1)
_lowerCamelCase : Dict = WhisperFeatureExtractor()
_lowerCamelCase : Any = feature_extractor(_UpperCamelCase , return_tensors="""pt""").input_features
self.assertEqual(input_features.shape , (1, 80, 3000))
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCamelCase , atol=1E-4))
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowerCamelCase : List[str] = self._load_datasamples(1)[0]
_lowerCamelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowerCamelCase : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCamelCase)[0]
self.assertTrue(np.all(np.mean(_UpperCamelCase) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase) - 1) < 1E-3))
| 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase : List[Any] ={
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( __A ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( __A , __A ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Tuple = False
elif args.student_type == "gpt2":
_lowerCamelCase : Union[str, Any] = False
def A__ ( __A , __A ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Dict = False
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Any = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__A , required=__A , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__A , required=__A , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__A , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__A , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__A , required=__A , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__A , type=__A , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__A , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__A , required=__A , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__A , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__A , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__A , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__A , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__A , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__A , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__A , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__A , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__A , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__A , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__A , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__A , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__A , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__A , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__A , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__A , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__A , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__A , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__A , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__A , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__A , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__A , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__A , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__A , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__A , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__A , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__A , default=4_000 , help="""Checkpoint interval.""" )
_lowerCamelCase : Dict = parser.parse_args()
sanity_checks(__A )
# ARGS #
init_gpu_params(__A )
set_seed(__A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__A ) , __A , indent=4 )
git_log(args.dump_path )
_lowerCamelCase : List[str] = MODEL_CLASSES[args.student_type]
_lowerCamelCase : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase : Dict = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase : Union[str, Any] = tokenizer.all_special_tokens.index(__A )
_lowerCamelCase : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
_lowerCamelCase : List[str] = special_tok_ids
_lowerCamelCase : int = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
_lowerCamelCase : Dict = pickle.load(__A )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
_lowerCamelCase : int = pickle.load(__A )
_lowerCamelCase : List[str] = np.maximum(__A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase : Tuple = 0.0 # do not predict special tokens
_lowerCamelCase : Optional[Any] = torch.from_numpy(__A )
else:
_lowerCamelCase : int = None
_lowerCamelCase : Tuple = LmSeqsDataset(params=__A , data=__A )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
_lowerCamelCase : Dict = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_lowerCamelCase : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=__A )
else:
_lowerCamelCase : Dict = student_model_class(__A )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
_lowerCamelCase : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__A )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__A , __A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__A , __A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase : Optional[Any] = Distiller(
params=__A , dataset=__A , token_probs=__A , student=__A , teacher=__A )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""")
_lowerCamelCase : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""")
_lowerCamelCase : Union[str, Any] = bertabert.config.encoder.vocab_size
_lowerCamelCase : List[Any] = tokenizer.sep_token_id
_lowerCamelCase : Any = tokenizer.cls_token_id
_lowerCamelCase : Optional[Any] = 128
_lowerCamelCase : Dict = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""")
_lowerCamelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""")
_lowerCamelCase : List[Any] = train_dataset.select(range(32))
_lowerCamelCase : Tuple = val_dataset.select(range(16))
_lowerCamelCase : Tuple = 4
def _map_to_encoder_decoder_inputs(_UpperCamelCase : Optional[Any]):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : Dict = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCamelCase , max_length=512)
_lowerCamelCase : Any = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCamelCase , max_length=128)
_lowerCamelCase : Dict = inputs.input_ids
_lowerCamelCase : str = inputs.attention_mask
_lowerCamelCase : Tuple = outputs.input_ids
_lowerCamelCase : Optional[int] = outputs.input_ids.copy()
_lowerCamelCase : List[str] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_lowerCamelCase : List[str] = outputs.attention_mask
assert all(len(_UpperCamelCase) == 512 for x in inputs.input_ids)
assert all(len(_UpperCamelCase) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(_UpperCamelCase : str):
_lowerCamelCase : List[str] = pred.label_ids
_lowerCamelCase : str = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : List[str] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = sum([int(pred_str[i] == label_str[i]) for i in range(len(_UpperCamelCase))]) / len(_UpperCamelCase)
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : Any = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCamelCase , batch_size=_UpperCamelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_lowerCamelCase : Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCamelCase , batch_size=_UpperCamelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Dict = SeqaSeqTrainingArguments(
output_dir=_UpperCamelCase , per_device_train_batch_size=_UpperCamelCase , per_device_eval_batch_size=_UpperCamelCase , predict_with_generate=_UpperCamelCase , evaluation_strategy="""steps""" , do_train=_UpperCamelCase , do_eval=_UpperCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : Dict = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# start training
trainer.train()
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase : List[Any] =open # noqa: we just need to have a builtin inside this module to test it properly
| 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""")
_lowerCamelCase : List[Any] = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2)
_lowerCamelCase : Tuple = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
for example in examples:
_lowerCamelCase : Any = video_classifier(_UpperCamelCase)
self.assertEqual(
_UpperCamelCase , [
{"""score""": ANY(_UpperCamelCase), """label""": ANY(_UpperCamelCase)},
{"""score""": ANY(_UpperCamelCase), """label""": ANY(_UpperCamelCase)},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_lowerCamelCase : Any = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10})
_lowerCamelCase : str = pipeline(
"""video-classification""" , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4)
_lowerCamelCase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""")
_lowerCamelCase : str = video_classifier(_UpperCamelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
_lowerCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
pass
| 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 0 |
from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase : Tuple ="\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCAmelCase : List[Any] ="\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowerCAmelCase : Any ="\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : int = CHRF.CHAR_ORDER , _UpperCamelCase : int = CHRF.WORD_ORDER , _UpperCamelCase : int = CHRF.BETA , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = len(references[0])
if any(len(_UpperCamelCase) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
_lowerCamelCase : str = [[refs[i] for refs in references] for i in range(_UpperCamelCase)]
_lowerCamelCase : List[Any] = CHRF(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = sb_chrf.corpus_score(_UpperCamelCase , _UpperCamelCase)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str ={
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase : Tuple =sys.version_info >= (3, 10)
def A__ ( __A=None , __A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = False
_snake_case = True
_snake_case = None
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'titi'
_snake_case = 'toto'
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'titi'
_snake_case = 'toto'
_snake_case = 42
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 'toto'
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BasicEnum(self.foo)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 'toto'
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MixedTypeEnum(self.foo)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = None
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
_snake_case = None
_snake_case = list_field(default=[] )
_snake_case = list_field(default=[] )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = list_field(default=[] )
_snake_case = list_field(default=[1, 2, 3] )
_snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_snake_case = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field()
_snake_case = field()
_snake_case = field()
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = BasicEnum(self.required_enum)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = field()
_snake_case = None
_snake_case = field(default='toto' , metadata={'help': 'help message'} )
_snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = False
_snake_case = True
_snake_case = None
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = None
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
_snake_case = None
_snake_case = list_field(default=[] )
_snake_case = list_field(default=[] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser) ->Tuple:
"""simple docstring"""
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
_lowerCamelCase : Optional[Any] = {k: v for k, v in vars(_UpperCamelCase).items() if k != """container"""}
_lowerCamelCase : Any = {k: v for k, v in vars(_UpperCamelCase).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _UpperCamelCase) and yy.get("""choices""" , _UpperCamelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_UpperCamelCase) , yy["""type"""](_UpperCamelCase))
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--bar""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--baz""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--flag""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""")
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(_lowerCamelCase ) : Tuple = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase)
self.assertFalse(example.flag)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_UpperCamelCase)
expected.add_argument("""--baz""" , default="""toto""" , type=_UpperCamelCase , help="""help message""")
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""")
expected.add_argument("""--baz""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""")
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_UpperCamelCase , dest="""baz""")
expected.add_argument("""--opt""" , type=_UpperCamelCase , default=_UpperCamelCase)
_lowerCamelCase : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase)
for dataclass_type in dataclass_types:
_lowerCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = parser.parse_args([])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : Any = parser.parse_args(["""--foo""", """--no_baz"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : Optional[Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
_lowerCamelCase : int = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42]) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Dict = parser.parse_args([])
self.assertEqual(args.foo , """toto""")
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
_lowerCamelCase : Tuple = parser.parse_args(["""--foo""", """titi"""])
self.assertEqual(args.foo , """titi""")
_lowerCamelCase : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
_lowerCamelCase : List[str] = parser.parse_args(["""--foo""", """42"""])
self.assertEqual(args.foo , 42)
_lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 'toto'
_lowerCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42]) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Tuple = parser.parse_args([])
self.assertEqual(args.foo , """toto""")
_lowerCamelCase : Any = parser.parse_args(["""--foo""", """titi"""])
self.assertEqual(args.foo , """titi""")
_lowerCamelCase : str = parser.parse_args(["""--foo""", """42"""])
self.assertEqual(args.foo , 42)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_UpperCamelCase)
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_UpperCamelCase)
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_UpperCamelCase)
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = parser.parse_args([])
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3]) , )
_lowerCamelCase : Any = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split())
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7]))
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_UpperCamelCase , type=_UpperCamelCase)
expected.add_argument("""--bar""" , default=_UpperCamelCase , type=_UpperCamelCase , help="""help message""")
expected.add_argument("""--baz""" , default=_UpperCamelCase , type=_UpperCamelCase)
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_UpperCamelCase)
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_UpperCamelCase)
_lowerCamelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase)
for dataclass_type in dataclass_types:
_lowerCamelCase : Tuple = HfArgumentParser(_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = parser.parse_args([])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[]))
_lowerCamelCase : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split())
self.assertEqual(_UpperCamelCase , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3]))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--required_str""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=_UpperCamelCase , )
expected.add_argument("""--opt""" , type=_UpperCamelCase , default=_UpperCamelCase)
expected.add_argument("""--baz""" , default="""toto""" , type=_UpperCamelCase , help="""help message""")
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : List[Any] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
_lowerCamelCase : int = parser.parse_dict(_UpperCamelCase)[0]
_lowerCamelCase : List[Any] = BasicExample(**_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Tuple = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : List[str] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = os.path.join(_UpperCamelCase , """temp_json""")
os.mkdir(_UpperCamelCase)
with open(temp_local_path + """.json""" , """w+""") as f:
json.dump(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json"""))[0]
_lowerCamelCase : Dict = BasicExample(**_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(_UpperCamelCase , """temp_yaml""")
os.mkdir(_UpperCamelCase)
with open(temp_local_path + """.yaml""" , """w+""") as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.yaml"""))[0]
_lowerCamelCase : Any = BasicExample(**_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = HfArgumentParser(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[Any] ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
lowerCAmelCase : Optional[Any] =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Union[str, Any]) ->int:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : Optional[Any] = eval_examples
_lowerCamelCase : Dict = post_process_function
_lowerCamelCase : int = quant_trainer_args
_lowerCamelCase : Union[str, Any] = 128 # default number of calibration samples
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[int]=None) ->Optional[int]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""")
_lowerCamelCase : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
_lowerCamelCase : str = self._remove_unused_columns(_UpperCamelCase , description="""Calibration""")
return DataLoader(
_UpperCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
_lowerCamelCase : Optional[int] = self.get_calib_dataloader(_UpperCamelCase)
_lowerCamelCase : List[str] = self.model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args , calib=_UpperCamelCase)
model.eval()
quant_trainer.enable_calibration(_UpperCamelCase)
logger.info("""***** Running calibration *****""")
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(_UpperCamelCase):
# Prediction step
_lowerCamelCase : List[Any] = self.prediction_step(_UpperCamelCase , _UpperCamelCase , prediction_loss_only=_UpperCamelCase)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_UpperCamelCase , self.quant_trainer_args)
_lowerCamelCase : Any = model
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : str = "eval") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCamelCase : Dict = self.get_eval_dataloader(_UpperCamelCase)
_lowerCamelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Dict = self.compute_metrics
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : List[Any] = eval_loop(
_UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
_lowerCamelCase : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_lowerCamelCase : Dict = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions)
_lowerCamelCase : int = self.compute_metrics(_UpperCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
_lowerCamelCase : List[str] = metrics.pop(_UpperCamelCase)
self.log(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowerCamelCase : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCamelCase)
return metrics
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str = "test") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_test_dataloader(_UpperCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Optional[int] = self.compute_metrics
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : Union[str, Any] = eval_loop(
_UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
_lowerCamelCase : Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCamelCase : Optional[Any] = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions , """predict""")
_lowerCamelCase : Union[str, Any] = self.compute_metrics(_UpperCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
_lowerCamelCase : List[Any] = metrics.pop(_UpperCamelCase)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[str]="./") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.eval_dataset
_lowerCamelCase : List[Any] = self.get_eval_dataloader(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = next(iter(_UpperCamelCase))
# saving device - to make it consistent
_lowerCamelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
# convert to tuple
_lowerCamelCase : Optional[Any] = tuple(v.to(_UpperCamelCase) for k, v in batch.items())
logger.info("""Converting model to be onnx compatible""")
from pytorch_quantization.nn import TensorQuantizer
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Union[str, Any] = self.model.to(_UpperCamelCase)
model.eval()
model.float()
_lowerCamelCase : Any = model.module if hasattr(_UpperCamelCase , """module""") else model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args)
_lowerCamelCase : Tuple = os.path.join(_UpperCamelCase , """model.onnx""")
logger.info(F"""exporting model to {output_model_file}""")
_lowerCamelCase : List[str] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , export_params=_UpperCamelCase , opset_version=13 , do_constant_folding=_UpperCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_UpperCamelCase , )
logger.info("""onnx export finished""")
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A__ ( __A ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCamelCase : List[str] = [1, 2, 3]
with pytest.raises(__A ):
with parallel_backend("""unsupported backend""" ):
map_nested(__A , __A , num_proc=2 )
with pytest.raises(__A ):
with parallel_backend("""unsupported backend""" ):
map_nested(__A , __A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [1, 2]
_lowerCamelCase : str = {"""a""": 1, """b""": 2}
_lowerCamelCase : Dict = {"""a""": [1, 2], """b""": [3, 4]}
_lowerCamelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
_lowerCamelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
_lowerCamelCase : Dict = [2, 3]
_lowerCamelCase : str = {"""a""": 2, """b""": 3}
_lowerCamelCase : Tuple = {"""a""": [2, 3], """b""": [4, 5]}
_lowerCamelCase : Union[str, Any] = {"""a""": {"""1""": 2}, """b""": 3}
_lowerCamelCase : List[Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TapasConfig.from_json_file(__A )
# set absolute/relative position embeddings parameter
_lowerCamelCase : List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowerCamelCase : Optional[int] = TapasForQuestionAnswering(config=__A )
elif task == "WTQ":
# run_task_main.py hparams
_lowerCamelCase : int = 4
_lowerCamelCase : Union[str, Any] = True
# hparam_utils.py hparams
_lowerCamelCase : Any = 0.664_694
_lowerCamelCase : int = 0.207_951
_lowerCamelCase : Union[str, Any] = 0.121_194
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : Tuple = 0.0_352_513
_lowerCamelCase : Tuple = TapasForQuestionAnswering(config=__A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = False
# hparam_utils.py hparams
_lowerCamelCase : str = 36.4_519
_lowerCamelCase : Any = 0.903_421
_lowerCamelCase : int = 222.088
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Any = 0.763_141
_lowerCamelCase : List[str] = TapasForQuestionAnswering(config=__A )
elif task == "TABFACT":
_lowerCamelCase : List[str] = TapasForSequenceClassification(config=__A )
elif task == "MLM":
_lowerCamelCase : int = TapasForMaskedLM(config=__A )
elif task == "INTERMEDIATE_PRETRAINING":
_lowerCamelCase : Union[str, Any] = TapasModel(config=__A )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__A , __A , __A )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__A )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_lowerCamelCase : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__A )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase : List[str] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
lowerCAmelCase : Optional[int] =[0, 2, 4, 6, 8]
lowerCAmelCase : Tuple =[1, 3, 5, 7, 9]
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCamelCase : Union[str, Any] = 0
for digit in range(10 ):
_lowerCamelCase : Optional[int] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __A , __A )
return result
_lowerCamelCase : Optional[Any] = 0
for digita in range(10 ):
_lowerCamelCase : Tuple = digita
if (remainder + digita) % 2 == 0:
_lowerCamelCase : Tuple = ODD_DIGITS
else:
_lowerCamelCase : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCamelCase : Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __A , __A , )
return result
def A__ ( __A = 9 ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__A , 0 , [0] * length , __A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
import qiskit
def A__ ( __A = 2 ):
'''simple docstring'''
_lowerCamelCase : str = qubits
# Using Aer's simulator
_lowerCamelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_lowerCamelCase : Dict = qiskit.QuantumCircuit(__A , __A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__A ) ) , list(range(__A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCamelCase : Dict = qiskit.execute(__A , __A , shots=1_000 )
return job.result().get_counts(__A )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCamelCase : str = n - k
# Calculate C(n,k)
for i in range(__A ):
result *= n - i
result //= i + 1
return result
def A__ ( __A ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __A ) // (node_count + 1)
def A__ ( __A ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
_lowerCamelCase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A__ ( __A ):
'''simple docstring'''
return catalan_number(__A ) * factorial(__A )
if __name__ == "__main__":
lowerCAmelCase : List[Any] =int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase : Optional[int] =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_snake_case = field(
default=__lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
_snake_case = field(
default=__lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
_snake_case = field(
default=__lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : List[Any] = v.to_dict()
return d
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
lowerCAmelCase : Dict =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] =[None] * 10000000
lowerCAmelCase : Tuple =True
lowerCAmelCase : List[str] =False
def A__ ( __A ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCamelCase : Optional[int] = chain(next_number(__A ) )
_lowerCamelCase : int = number_chain
while number < 10_000_000:
_lowerCamelCase : Dict = number_chain
number *= 10
return number_chain
def A__ ( __A = 10_000_000 ):
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase : int ="platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def A__ ( __A , __A , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowerCamelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowerCamelCase : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=13 , _UpperCamelCase : int=7 , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : int=99 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any=32 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : int=0.0_2 , ) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : List[Any] = pad_token_id
_lowerCamelCase : List[Any] = bos_token_id
_lowerCamelCase : Tuple = initializer_range
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
_lowerCamelCase : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
_lowerCamelCase : List[Any] = shift_tokens_right(_UpperCamelCase , 1 , 2)
_lowerCamelCase : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
_lowerCamelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = 20
_lowerCamelCase : Tuple = model_class_name(_UpperCamelCase)
_lowerCamelCase : Any = model.encode(inputs_dict["""input_ids"""])
_lowerCamelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCamelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
_lowerCamelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
_lowerCamelCase : int = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
_lowerCamelCase : Dict = model.decode(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 20
_lowerCamelCase : Tuple = model_class_name(_UpperCamelCase)
_lowerCamelCase : Any = model.encode(inputs_dict["""input_ids"""])
_lowerCamelCase : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCamelCase : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_lowerCamelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowerCamelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
_lowerCamelCase : int = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowerCamelCase : Tuple = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase)
_lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = 99
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_lowerCamelCase : List[Any] = input_ids.shape[0]
_lowerCamelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = self._get_config_and_data()
_lowerCamelCase : Any = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase)
_lowerCamelCase : List[Any] = lm_model(input_ids=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_lowerCamelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase)
_lowerCamelCase : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
_lowerCamelCase : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
_lowerCamelCase : Dict = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase)
_lowerCamelCase : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
_lowerCamelCase : int = shift_tokens_right(_UpperCamelCase , 1 , 2)
_lowerCamelCase : Tuple = np.equal(_UpperCamelCase , 1).astype(np.floataa).sum()
_lowerCamelCase : Dict = np.equal(_UpperCamelCase , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(_UpperCamelCase , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class __snake_case ( __lowerCAmelCase , unittest.TestCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Any) ->Any:
"""simple docstring"""
_lowerCamelCase : Any = FlaxBlenderbotModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowerCamelCase : Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model_class(_UpperCamelCase)
@jax.jit
def encode_jitted(_UpperCamelCase : str , _UpperCamelCase : Any=None , **_UpperCamelCase : Dict):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase)
with self.subTest("""JIT Enabled"""):
_lowerCamelCase : Dict = encode_jitted(**_UpperCamelCase).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_lowerCamelCase : str = encode_jitted(**_UpperCamelCase).to_tuple()
self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase))
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowerCamelCase : Any = model_class(_UpperCamelCase)
_lowerCamelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
_lowerCamelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest("""JIT Enabled"""):
_lowerCamelCase : Dict = decode_jitted(**_UpperCamelCase).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_lowerCamelCase : Optional[int] = decode_jitted(**_UpperCamelCase).to_tuple()
self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase))
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Any = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCamelCase : Dict = np.ones((1, 1)) * model.config.eos_token_id
_lowerCamelCase : int = model(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _SCREAMING_SNAKE_CASE ( self : int) ->str:
"""simple docstring"""
_lowerCamelCase : Any = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_lowerCamelCase : Dict = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_lowerCamelCase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
_lowerCamelCase : List[Any] = ["""Sam"""]
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase , return_tensors="""jax""")
_lowerCamelCase : Optional[int] = model.generate(**_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : Dict = """Sam is a great name. It means \"sun\" in Gaelic."""
_lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase)
assert generated_txt[0].strip() == tgt_text
| 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
def merge(__A , __A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
_lowerCamelCase : Optional[int] = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict =input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase : Dict =[int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A__ ( __A , __A , __A , __A , __A=True , __A="pt" ):
'''simple docstring'''
_lowerCamelCase : int = {"""add_prefix_space""": True} if isinstance(__A , __A ) and not line.startswith(""" """ ) else {}
_lowerCamelCase : List[Any] = padding_side
return tokenizer(
[line] , max_length=__A , padding="""max_length""" if pad_to_max_length else None , truncation=__A , return_tensors=__A , add_special_tokens=__A , **__A , )
def A__ ( __A , __A , __A=None , ):
'''simple docstring'''
_lowerCamelCase : List[str] = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any="train" , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Any="" , ) ->str:
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = Path(_UpperCamelCase).joinpath(type_path + """.source""")
_lowerCamelCase : Any = Path(_UpperCamelCase).joinpath(type_path + """.target""")
_lowerCamelCase : str = self.get_char_lens(self.src_file)
_lowerCamelCase : str = max_source_length
_lowerCamelCase : Optional[int] = max_target_length
assert min(self.src_lens) > 0, F"""found empty line in {self.src_file}"""
_lowerCamelCase : str = tokenizer
_lowerCamelCase : List[str] = prefix
if n_obs is not None:
_lowerCamelCase : Tuple = self.src_lens[:n_obs]
_lowerCamelCase : Dict = src_lang
_lowerCamelCase : str = tgt_lang
def __len__( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Optional[int] , _UpperCamelCase : str) ->Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCamelCase : Tuple = index + 1 # linecache starts at 1
_lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) , _UpperCamelCase).rstrip("""\n""")
_lowerCamelCase : int = linecache.getline(str(self.tgt_file) , _UpperCamelCase).rstrip("""\n""")
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase) else self.tokenizer
)
_lowerCamelCase : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase) else self.tokenizer
_lowerCamelCase : Union[str, Any] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , """right""")
_lowerCamelCase : Optional[int] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , """right""")
_lowerCamelCase : Any = source_inputs["""input_ids"""].squeeze()
_lowerCamelCase : Tuple = target_inputs["""input_ids"""].squeeze()
_lowerCamelCase : Dict = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : int) ->Optional[Any]:
"""simple docstring"""
return [len(_UpperCamelCase) for x in Path(_UpperCamelCase).open().readlines()]
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int) ->Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCamelCase : Dict = torch.stack([x["""input_ids"""] for x in batch])
_lowerCamelCase : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch])
_lowerCamelCase : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch])
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase)
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase)
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Optional[Any] = trim_batch(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase)
_lowerCamelCase : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowerCAmelCase : Dict = getLogger(__name__)
def A__ ( __A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : str = get_git_info()
save_json(__A , os.path.join(__A , """git_log.json""" ) )
def A__ ( __A , __A , __A=4 , **__A ):
'''simple docstring'''
with open(__A , """w""" ) as f:
json.dump(__A , __A , indent=__A , **__A )
def A__ ( __A ):
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = git.Repo(search_parent_directories=__A )
_lowerCamelCase : Any = {
"""repo_id""": str(__A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def A__ ( __A , __A ):
'''simple docstring'''
return list(map(__A , __A ) )
def A__ ( __A , __A ):
'''simple docstring'''
with open(__A , """wb""" ) as f:
return pickle.dump(__A , __A )
def A__ ( __A ):
'''simple docstring'''
def remove_articles(__A ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
_lowerCamelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = normalize_answer(__A ).split()
_lowerCamelCase : Optional[int] = normalize_answer(__A ).split()
_lowerCamelCase : int = Counter(__A ) & Counter(__A )
_lowerCamelCase : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : Union[str, Any] = 1.0 * num_same / len(__A )
_lowerCamelCase : Tuple = 1.0 * num_same / len(__A )
_lowerCamelCase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def A__ ( __A , __A ):
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def A__ ( __A , __A ):
'''simple docstring'''
assert len(__A ) == len(__A )
_lowerCamelCase : Tuple = 0
for hypo, pred in zip(__A , __A ):
em += exact_match_score(__A , __A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def A__ ( __A ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : int = """dropout_rate"""
for p in extra_params:
if getattr(__A , __A , __A ):
if not hasattr(__A , __A ) and not hasattr(__A , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__A ) )
delattr(__A , __A )
continue
_lowerCamelCase : str = p if hasattr(__A , __A ) else equivalent_param[p]
setattr(__A , __A , getattr(__A , __A ) )
delattr(__A , __A )
return hparams, config
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = GPTSwaTokenizer
_snake_case = False
_snake_case = True
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Dict = GPTSwaTokenizer(_UpperCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""")
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """This is a test"""
_lowerCamelCase : Any = """This is a test"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : List[str] = """<s>"""
_lowerCamelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """j""")
self.assertEqual(len(_UpperCamelCase) , 2000)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = GPTSwaTokenizer(_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [465, 287, 265, 631, 842])
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
# fmt: off
self.assertListEqual(
_UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
# fmt: off
self.assertListEqual(
_UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""])
# fmt: on
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = GPTSwaTokenizer(_UpperCamelCase)
_lowerCamelCase : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
_lowerCamelCase : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCamelCase , _UpperCamelCase):
self.assertListEqual(tokenizer.encode_fast(_UpperCamelCase) , _UpperCamelCase)
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCamelCase , _UpperCamelCase):
self.assertEqual(tokenizer.decode_fast(_UpperCamelCase) , _UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Dict = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_lowerCamelCase : List[str] = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=_UpperCamelCase , )
| 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int=7 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Optional[int]=18 , _UpperCamelCase : Any=30 , _UpperCamelCase : Optional[Any]=400 , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=True , _UpperCamelCase : str=[0.5, 0.5, 0.5] , _UpperCamelCase : int=[0.5, 0.5, 0.5] , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCamelCase : Dict = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : Tuple = size
_lowerCamelCase : Union[str, Any] = do_normalize
_lowerCamelCase : Dict = image_mean
_lowerCamelCase : str = image_std
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = DPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = DPTImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCamelCase , """image_mean"""))
self.assertTrue(hasattr(_UpperCamelCase , """image_std"""))
self.assertTrue(hasattr(_UpperCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_UpperCamelCase , """do_resize"""))
self.assertTrue(hasattr(_UpperCamelCase , """size"""))
def _SCREAMING_SNAKE_CASE ( self : int) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image)
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCamelCase : Optional[int] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray)
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCamelCase : List[str] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor)
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCamelCase : str = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 0 |
from math import factorial
def A__ ( __A , __A ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(__A ) // (factorial(__A ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowerCAmelCase : int ="\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCAmelCase : Optional[Any] ="\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCAmelCase : str ="\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string"""), """prediction_text""": datasets.Value("""string""")},
"""references""": {
"""id""": datasets.Value("""string"""),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
},
}) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_lowerCamelCase : str = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_lowerCamelCase : List[Any] = evaluate(dataset=_UpperCamelCase , predictions=_UpperCamelCase)
return score
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCAmelCase : str ="\nimport os\n"
lowerCAmelCase : int ="\ndef foo():\n import os\n return False\n"
lowerCAmelCase : Optional[int] ="\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
lowerCAmelCase : List[Any] ="\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
lowerCAmelCase : Dict ="\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
lowerCAmelCase : Union[str, Any] ="\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
lowerCAmelCase : Any ="\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
lowerCAmelCase : Tuple ="\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
lowerCAmelCase : Optional[Any] ="\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
lowerCAmelCase : Optional[Any] ="\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
lowerCAmelCase : Optional[Any] =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , __A )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = os.path.join(__A , """test_file.py""" )
with open(__A , """w""" ) as _tmp_file:
_tmp_file.write(__A )
_lowerCamelCase : Optional[int] = get_imports(__A )
assert parsed_imports == ["os"]
| 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = DiTPipeline
_snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_UpperCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_UpperCamelCase , )
_lowerCamelCase : Dict = AutoencoderKL()
_lowerCamelCase : Tuple = DDIMScheduler()
_lowerCamelCase : Any = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : Any = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : Any = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Optional[int] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """cpu"""
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : List[str] = self.pipeline_class(**_UpperCamelCase)
pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : Dict = self.get_dummy_inputs(_UpperCamelCase)
_lowerCamelCase : Any = pipe(**_UpperCamelCase).images
_lowerCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3))
_lowerCamelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7])
_lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_UpperCamelCase , 1E-3)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_UpperCamelCase , expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = torch.manual_seed(0)
_lowerCamelCase : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""")
pipe.to("""cuda""")
_lowerCamelCase : List[str] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCamelCase : Any = pipe.get_label_ids(_UpperCamelCase)
_lowerCamelCase : List[Any] = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=40 , output_type="""np""").images
for word, image in zip(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : List[str] = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""")
assert np.abs((expected_image - image).max()) < 1E-2
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""")
_lowerCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("""cuda""")
_lowerCamelCase : Tuple = ["""vase""", """umbrella"""]
_lowerCamelCase : Dict = pipe.get_label_ids(_UpperCamelCase)
_lowerCamelCase : List[Any] = torch.manual_seed(0)
_lowerCamelCase : List[Any] = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=25 , output_type="""np""").images
for word, image in zip(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"""/dit/{word}_512.npy""")
assert np.abs((expected_image - image).max()) < 1E-1
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A__ ( __A = 8 ):
'''simple docstring'''
_lowerCamelCase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(__A ) for _ in range(__A ) )
def A__ ( __A , __A ):
'''simple docstring'''
i -= len(__A )
_lowerCamelCase : Optional[int] = i // 3
_lowerCamelCase : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCamelCase : Optional[Any] = (
chars_incl
+ random(__A , quotient + remainder )
+ random(__A , __A )
+ random(__A , __A )
)
_lowerCamelCase : List[Any] = list(__A )
shuffle(__A )
return "".join(__A )
# random is a generalised function for letters, characters and numbers
def A__ ( __A , __A ):
'''simple docstring'''
return "".join(secrets.choice(__A ) for _ in range(__A ) )
def A__ ( __A , __A ):
'''simple docstring'''
pass # Put your code here...
def A__ ( __A , __A ):
'''simple docstring'''
pass # Put your code here...
def A__ ( __A , __A ):
'''simple docstring'''
pass # Put your code here...
def A__ ( __A , __A = 8 ):
'''simple docstring'''
if len(__A ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCamelCase : Dict = any(char in ascii_uppercase for char in password )
_lowerCamelCase : str = any(char in ascii_lowercase for char in password )
_lowerCamelCase : Any = any(char in digits for char in password )
_lowerCamelCase : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = int(input("""Please indicate the max length of your password: """ ).strip() )
_lowerCamelCase : List[Any] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(__A ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(__A , __A ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
from __future__ import annotations
def A__ ( __A ): # This function is recursive
'''simple docstring'''
_lowerCamelCase : Tuple = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_lowerCamelCase : Optional[Any] = array[0]
_lowerCamelCase : List[str] = False
_lowerCamelCase : Dict = 1
_lowerCamelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_lowerCamelCase : Dict = True
_lowerCamelCase : int = [element for element in array[i:] if element >= array[i]]
_lowerCamelCase : Tuple = longest_subsequence(__A )
if len(__A ) > len(__A ):
_lowerCamelCase : Optional[int] = temp_array
else:
i += 1
_lowerCamelCase : Optional[Any] = [element for element in array[1:] if element >= pivot]
_lowerCamelCase : Tuple = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
from __future__ import annotations
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Dict = sorted(numsa + numsa )
_lowerCamelCase : Optional[int] = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict =[float(x) for x in input("Enter the elements of first array: ").split()]
lowerCAmelCase : Any =[float(x) for x in input("Enter the elements of second array: ").split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = 42
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : list[list[Edge]] = [[] for _ in range(_UpperCamelCase)]
_lowerCamelCase : Any = size
def __getitem__( self : int , _UpperCamelCase : int) ->Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
return self._size
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""")
self._graph[from_vertex].append(Edge(_UpperCamelCase , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int) ->int | None:
"""simple docstring"""
_lowerCamelCase : List[str] = deque([start_vertex])
_lowerCamelCase : list[int | None] = [None] * self.size
_lowerCamelCase : List[str] = 0
while queue:
_lowerCamelCase : int = queue.popleft()
_lowerCamelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowerCamelCase : List[Any] = current_distance + edge.weight
_lowerCamelCase : List[str] = distances[edge.destination_vertex]
if (
isinstance(_UpperCamelCase , _UpperCamelCase)
and new_distance >= dest_vertex_distance
):
continue
_lowerCamelCase : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : str = "" , _UpperCamelCase : bool = False) ->None:
"""simple docstring"""
_lowerCamelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_lowerCamelCase : str = is_leaf
_lowerCamelCase : str = prefix
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str) ->tuple[str, str, str]:
"""simple docstring"""
_lowerCamelCase : List[Any] = 0
for q, w in zip(self.prefix , _UpperCamelCase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : list[str]) ->None:
"""simple docstring"""
for word in words:
self.insert(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str) ->None:
"""simple docstring"""
if self.prefix == word:
_lowerCamelCase : List[str] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowerCamelCase : Optional[Any] = RadixNode(prefix=_UpperCamelCase , is_leaf=_UpperCamelCase)
else:
_lowerCamelCase : Dict = self.nodes[word[0]]
_lowerCamelCase : Any = incoming_node.match(
_UpperCamelCase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCamelCase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowerCamelCase : Optional[Any] = remaining_prefix
_lowerCamelCase : str = self.nodes[matching_string[0]]
_lowerCamelCase : List[str] = RadixNode(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = aux_node
if remaining_word == "":
_lowerCamelCase : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str) ->bool:
"""simple docstring"""
_lowerCamelCase : Tuple = self.nodes.get(word[0] , _UpperCamelCase)
if not incoming_node:
return False
else:
_lowerCamelCase : int = incoming_node.match(
_UpperCamelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str) ->bool:
"""simple docstring"""
_lowerCamelCase : List[str] = self.nodes.get(word[0] , _UpperCamelCase)
if not incoming_node:
return False
else:
_lowerCamelCase : Any = incoming_node.match(
_UpperCamelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCamelCase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
_lowerCamelCase : List[str] = list(self.nodes.values())[0]
_lowerCamelCase : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
_lowerCamelCase : List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
_lowerCamelCase : Dict = False
# If there is 1 edge, we merge it with its child
else:
_lowerCamelCase : Dict = list(incoming_node.nodes.values())[0]
_lowerCamelCase : str = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowerCamelCase : Any = merging_node.nodes
return True
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int = 0) ->None:
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""")
for value in self.nodes.values():
value.print_tree(height + 1)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = """banana bananas bandana band apple all beast""".split()
_lowerCamelCase : int = RadixNode()
root.insert_many(__A )
assert all(root.find(__A ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def A__ ( ):
'''simple docstring'''
assert test_trie()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = RadixNode()
_lowerCamelCase : Dict = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__A )
print("""Words:""" , __A )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
def A__ ( __A , __A ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def A__ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] =16
lowerCAmelCase : Dict =32
def A__ ( __A , __A = 16 ):
'''simple docstring'''
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Tuple = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : int = 8
else:
_lowerCamelCase : Optional[int] = None
return tokenizer.pad(
__A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCamelCase : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
_lowerCamelCase : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : List[Any] =mocked_dataloaders # noqa: F811
def A__ ( __A , __A ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __A ) == "1":
_lowerCamelCase : Any = 2
# New Code #
_lowerCamelCase : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCamelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__A )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : List[str] = config["""lr"""]
_lowerCamelCase : Tuple = int(config["""num_epochs"""] )
_lowerCamelCase : Tuple = int(config["""seed"""] )
_lowerCamelCase : Union[str, Any] = int(config["""batch_size"""] )
_lowerCamelCase : List[str] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__A )
_lowerCamelCase : str = get_dataloaders(__A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : str = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
_lowerCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase : str = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__A ):
_lowerCamelCase : Dict = model(**__A )
_lowerCamelCase : str = output.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__A )
_lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A , references=__A , )
_lowerCamelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__A , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_lowerCamelCase : Any = parser.parse_args()
_lowerCamelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_lowerCamelCase : Optional[int] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__A )
else:
_lowerCamelCase : Optional[Any] = sylvester(number - 1 )
_lowerCamelCase : Tuple = num - 1
_lowerCamelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowerCAmelCase : str ={
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'tapas'
def __init__( self : Optional[Any] , _UpperCamelCase : Dict=3_0522 , _UpperCamelCase : int=768 , _UpperCamelCase : int=12 , _UpperCamelCase : List[Any]=12 , _UpperCamelCase : List[Any]=3072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=1024 , _UpperCamelCase : str=[3, 256, 256, 2, 256, 256, 10] , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=1E-1_2 , _UpperCamelCase : Dict=0 , _UpperCamelCase : int=10.0 , _UpperCamelCase : Dict=0 , _UpperCamelCase : List[Any]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : str=1.0 , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : Tuple=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Any="ratio" , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Dict=64 , _UpperCamelCase : int=32 , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : str=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : int , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = type_vocab_sizes
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
# Fine-tuning task hyperparameters
_lowerCamelCase : List[Any] = positive_label_weight
_lowerCamelCase : int = num_aggregation_labels
_lowerCamelCase : Any = aggregation_loss_weight
_lowerCamelCase : str = use_answer_as_supervision
_lowerCamelCase : int = answer_loss_importance
_lowerCamelCase : List[Any] = use_normalized_answer_loss
_lowerCamelCase : int = huber_loss_delta
_lowerCamelCase : Dict = temperature
_lowerCamelCase : List[str] = aggregation_temperature
_lowerCamelCase : Tuple = use_gumbel_for_cells
_lowerCamelCase : Optional[int] = use_gumbel_for_aggregation
_lowerCamelCase : Optional[int] = average_approximation_function
_lowerCamelCase : Optional[Any] = cell_selection_preference
_lowerCamelCase : Optional[Any] = answer_loss_cutoff
_lowerCamelCase : List[str] = max_num_rows
_lowerCamelCase : Optional[int] = max_num_columns
_lowerCamelCase : List[Any] = average_logits_per_cell
_lowerCamelCase : Dict = select_one_column
_lowerCamelCase : Optional[int] = allow_empty_column_selection
_lowerCamelCase : List[Any] = init_cell_selection_weights_to_zero
_lowerCamelCase : List[Any] = reset_position_index_per_cell
_lowerCamelCase : Tuple = disable_per_token_loss
# Aggregation hyperparameters
_lowerCamelCase : Optional[int] = aggregation_labels
_lowerCamelCase : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCamelCase):
_lowerCamelCase : int = {int(_UpperCamelCase): v for k, v in aggregation_labels.items()}
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_UpperCamelCase , """tf_padding"""))
self.parent.assertTrue(hasattr(_UpperCamelCase , """depth_multiplier"""))
class __snake_case :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : str , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : int=32 , _UpperCamelCase : Any=0.2_5 , _UpperCamelCase : str=8 , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=1024 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Tuple="relu6" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[str]=10 , _UpperCamelCase : Optional[int]=None , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Union[str, Any] = depth_multiplier
_lowerCamelCase : Tuple = min_depth
_lowerCamelCase : List[str] = tf_padding
_lowerCamelCase : Optional[int] = int(last_hidden_size * depth_multiplier)
_lowerCamelCase : Optional[int] = output_stride
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = classifier_dropout_prob
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = MobileNetVaModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : List[str] = model(_UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = MobileNetVaForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_snake_case = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = MobileNetVaModelTester(self)
_lowerCamelCase : List[Any] = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(_UpperCamelCase)
_lowerCamelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]):
_lowerCamelCase : Optional[int] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Any = 26
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""") if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""").to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : List[str] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] ={}
lowerCAmelCase : Dict[Optional[str], str] ={}
lowerCAmelCase : Dict[Optional[str], Exception] ={}
def A__ ( __A , __A , __A = None , ):
'''simple docstring'''
_lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_lowerCamelCase : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_lowerCamelCase : Union[str, Any] = format_type
def A__ ( __A , __A , __A = None ):
'''simple docstring'''
_lowerCamelCase : int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : Optional[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
lowerCAmelCase : Any =ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
lowerCAmelCase : Dict =ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
lowerCAmelCase : Union[str, Any] =ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def A__ ( __A ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A__ ( __A , **__A ):
'''simple docstring'''
_lowerCamelCase : Any = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 0 |
class __snake_case :
'''simple docstring'''
def __init__( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = {}
def _SCREAMING_SNAKE_CASE ( self : str) ->None:
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(_UpperCamelCase , """ -> """ , """ -> """.join([str(_UpperCamelCase) for j in self.vertex[i]]))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int , _UpperCamelCase : int) ->None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCamelCase)
else:
# else make a new vertex
_lowerCamelCase : Tuple = [to_vertex]
def _SCREAMING_SNAKE_CASE ( self : str) ->None:
"""simple docstring"""
_lowerCamelCase : List[str] = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : list) ->None:
"""simple docstring"""
_lowerCamelCase : str = True
print(_UpperCamelCase , end=""" """)
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCamelCase , _UpperCamelCase)
if __name__ == "__main__":
lowerCAmelCase : Any =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : str ={
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =[
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 0 |
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = word.split()
def justify(__A , __A , __A ) -> str:
_lowerCamelCase : List[str] = max_width - width
_lowerCamelCase : Any = len(__A )
if len(__A ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowerCamelCase : Optional[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowerCamelCase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowerCamelCase : Union[str, Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__A ):
num_spaces_between_words_list[i] += 1
_lowerCamelCase : Any = []
for i in range(__A ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__A )
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : list[str] = []
_lowerCamelCase : Any = 0
for word in words:
if width + len(__A ) + len(__A ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__A )
width += len(__A )
else:
# justify the line and add it to result
answer.append(justify(__A , __A , __A ) )
# reset new line and new width
_lowerCamelCase : List[str] = [word], len(__A )
_lowerCamelCase : List[Any] = max_width - width - len(__A )
answer.append(""" """.join(__A ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A__ ( __A , __A , __A = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__A ), magnitude * sin(__A )]
return [magnitude * cos(radians(__A ) ), magnitude * sin(radians(__A ) )]
def A__ ( __A , __A , __A = 10**-1 ):
'''simple docstring'''
_lowerCamelCase : NDArray[floataa] = cross(__A , __A )
_lowerCamelCase : float = sum(__A )
return abs(__A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase : List[str] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCAmelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase : Any = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCAmelCase : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase : List[str] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCAmelCase : Any = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['onnx']
def __init__( self : Optional[Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->int:
"""simple docstring"""
requires_backends(self , ["""onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Optional[int]) ->int:
"""simple docstring"""
requires_backends(cls , ["""onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any]) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""onnx"""])
| 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase : List[str] =[
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str = None , _UpperCamelCase : list = None) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : str = os.path.abspath(os.path.join("""examples""" , """by_feature"""))
_lowerCamelCase : Any = os.path.abspath("""examples""")
for item in os.listdir(_UpperCamelCase):
if item not in EXCLUDE_EXAMPLES:
_lowerCamelCase : int = os.path.join(_UpperCamelCase , _UpperCamelCase)
if os.path.isfile(_UpperCamelCase) and ".py" in item_path:
with self.subTest(
tested_script=_UpperCamelCase , feature_script=_UpperCamelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
_lowerCamelCase : Dict = compare_against_test(
os.path.join(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = """\n""".join(_UpperCamelCase)
if special_strings is not None:
for string in special_strings:
_lowerCamelCase : Union[str, Any] = diff.replace(_UpperCamelCase , """""")
self.assertEqual(_UpperCamelCase , """""")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _UpperCamelCase)
self.one_complete_example("""complete_nlp_example.py""" , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = os.path.abspath(os.path.join("""examples""" , """cv_example.py"""))
_lowerCamelCase : Dict = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.one_complete_example("""complete_cv_example.py""" , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any) ->List[Any]:
"""simple docstring"""
super().setUpClass()
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = os.path.join(cls._tmpdir , """default_config.yml""")
write_basic_config(save_location=cls.configPath)
_lowerCamelCase : List[str] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict) ->Optional[Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""")))
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_lowerCamelCase : Any = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""")))
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}
""".split()
_lowerCamelCase : Any = run_command(self._launch_args + testargs , return_stdout=_UpperCamelCase)
self.assertNotIn("""epoch 0:""" , _UpperCamelCase)
self.assertIn("""epoch 1:""" , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : Any = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}
""".split()
_lowerCamelCase : Dict = run_command(self._launch_args + testargs , return_stdout=_UpperCamelCase)
if torch.cuda.is_available():
_lowerCamelCase : List[str] = torch.cuda.device_count()
else:
_lowerCamelCase : Dict = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _UpperCamelCase)
self.assertIn("""epoch 1:""" , _UpperCamelCase)
else:
self.assertIn("""epoch 0:""" , _UpperCamelCase)
self.assertIn("""epoch 1:""" , _UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""}):
_lowerCamelCase : Any = run_command(self._launch_args + testargs , return_stdout=_UpperCamelCase)
_lowerCamelCase : List[Any] = re.findall("""({.+})""" , _UpperCamelCase)
_lowerCamelCase : List[Any] = [r for r in results if """accuracy""" in r][-1]
_lowerCamelCase : Optional[Any] = ast.literal_eval(_UpperCamelCase)
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""})
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_lowerCamelCase : int = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , """tracking""")))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs)
| 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : int=2 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : List[str]=[32, 64, 128] , _UpperCamelCase : Any=[1, 2, 1] , _UpperCamelCase : Tuple=[2, 2, 4] , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=2.0 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : int=True , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : List[Any]=1E-5 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Dict=10 , _UpperCamelCase : Dict=8 , _UpperCamelCase : Any=["stage1", "stage2"] , _UpperCamelCase : Dict=[1, 2] , ) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : List[Any] = num_heads
_lowerCamelCase : Union[str, Any] = window_size
_lowerCamelCase : Optional[Any] = mlp_ratio
_lowerCamelCase : List[str] = qkv_bias
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = drop_path_rate
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : str = use_absolute_embeddings
_lowerCamelCase : List[str] = patch_norm
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : int = initializer_range
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = scope
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Any = encoder_stride
_lowerCamelCase : List[Any] = out_features
_lowerCamelCase : str = out_indices
def _SCREAMING_SNAKE_CASE ( self : str) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int) ->Dict:
"""simple docstring"""
_lowerCamelCase : List[Any] = FocalNetModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = model(_UpperCamelCase)
_lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = FocalNetBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Any = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = FocalNetBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = FocalNetForMaskedImageModeling(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = FocalNetForMaskedImageModeling(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = self.type_sequence_label_size
_lowerCamelCase : Optional[int] = FocalNetForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Any = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCamelCase : List[str] = 1
_lowerCamelCase : int = FocalNetForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = FocalNetModelTester(self)
_lowerCamelCase : str = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
return
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@unittest.skip(reason="""FocalNet does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""")
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Tuple = model_class(_UpperCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_lowerCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[str] = model_class(_UpperCamelCase)
_lowerCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Union[str, Any] = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
# FocalNet has a different seq_length
_lowerCamelCase : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_lowerCamelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
_lowerCamelCase : Any = reshaped_hidden_states[0].shape
_lowerCamelCase : Dict = (
reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[str] = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Tuple = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = 3
_lowerCamelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Dict = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Dict = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = FocalNetModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = _config_zero_init(_UpperCamelCase)
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=_UpperCamelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Dict = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(_UpperCamelCase)
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_lowerCamelCase : List[str] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (FocalNetBackbone,) if is_torch_available() else ()
_snake_case = FocalNetConfig
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = FocalNetModelTester(self)
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
if isinstance(__A , __A ):
for v in tree.values():
shapes.extend(_fetch_dims(__A ) )
elif isinstance(__A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__A ) )
elif isinstance(__A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = []
for d in reversed(__A ):
idx.append(flat_idx % d )
_lowerCamelCase : List[str] = flat_idx // d
return tuple(reversed(__A ) )
@torch.jit.ignore
def A__ ( __A , __A , __A , __A = None , __A = None , ):
'''simple docstring'''
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__A ) -> None:
_lowerCamelCase : Optional[int] = True
for i in range(len(__A ) ):
_lowerCamelCase : int = -1 * (i + 1)
l[reversed_idx] &= tally
_lowerCamelCase : str = l[reversed_idx]
if start_edges is None:
_lowerCamelCase : Optional[int] = [s == 0 for s in start]
reduce_edge_list(__A )
if end_edges is None:
_lowerCamelCase : List[str] = [e == (d - 1) for e, d in zip(__A , __A )]
reduce_edge_list(__A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__A ) == 0:
return [()]
elif len(__A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_lowerCamelCase : List[Tuple[slice, ...]] = []
_lowerCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__A , __A ):
if s == e:
path_list.append(slice(__A , s + 1 ) )
else:
break
_lowerCamelCase : Tuple[slice, ...] = tuple(__A )
_lowerCamelCase : Tuple = len(__A )
# start == end, and we're done
if divergence_idx == len(__A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase : List[Any] = start[divergence_idx]
return tuple(
path + (slice(__A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase : List[str] = end[divergence_idx]
return tuple(
path + (slice(__A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_lowerCamelCase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = t.shape[:no_batch_dims]
_lowerCamelCase : str = list(_flat_idx_to_idx(__A , __A ) )
# _get_minimal_slice_set is inclusive
_lowerCamelCase : List[str] = list(_flat_idx_to_idx(flat_end - 1 , __A ) )
# Get an ordered list of slices to perform
_lowerCamelCase : int = _get_minimal_slice_set(
__A , __A , __A , )
_lowerCamelCase : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def A__ ( __A , __A , __A , __A , __A = False , __A = None , __A = False , ):
'''simple docstring'''
if not (len(__A ) > 0):
raise ValueError("""Must provide at least one input""" )
_lowerCamelCase : Dict = [shape[:no_batch_dims] for shape in _fetch_dims(__A )]
_lowerCamelCase : Union[str, Any] = tuple([max(__A ) for s in zip(*__A )] )
def _prep_inputs(__A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_lowerCamelCase : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_lowerCamelCase : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_lowerCamelCase : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_lowerCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , __A )
_lowerCamelCase : str = None
if _out is not None:
_lowerCamelCase : int = tensor_tree_map(lambda __A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_lowerCamelCase : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_lowerCamelCase : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = prepped_outputs
for _ in range(__A ):
# Chunk the input
if not low_mem:
_lowerCamelCase : Dict = _select_chunk
else:
_lowerCamelCase : Optional[int] = partial(
_chunk_slice , flat_start=__A , flat_end=min(__A , i + chunk_size ) , no_batch_dims=len(__A ) , )
_lowerCamelCase : Dict[str, Any] = tensor_tree_map(__A , __A )
# Run the layer on the chunk
_lowerCamelCase : Dict = layer(**__A )
# Allocate space for the output
if out is None:
_lowerCamelCase : Dict = tensor_tree_map(lambda __A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __A )
# Put the chunk in its pre-allocated space
if isinstance(__A , __A ):
def assign(__A , __A ) -> None:
for k, v in da.items():
if isinstance(__A , __A ):
assign(__A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_lowerCamelCase : List[str] = da[k]
assign(__A , __A )
elif isinstance(__A , __A ):
for xa, xa in zip(__A , __A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_lowerCamelCase : Optional[int] = xa
elif isinstance(__A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_lowerCamelCase : Tuple = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
_lowerCamelCase : Optional[int] = tensor_tree_map(lambda __A : t.view(orig_batch_dims + t.shape[1:] ) , __A )
return out
class __snake_case :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int = 512 , ) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = max_chunk_size
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[tuple] = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Callable , _UpperCamelCase : tuple , _UpperCamelCase : int) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_lowerCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
_lowerCamelCase : Dict = [c for c in candidates if c > min_chunk_size]
_lowerCamelCase : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCamelCase : int) -> bool:
try:
with torch.no_grad():
fn(*_UpperCamelCase , chunk_size=_UpperCamelCase)
return True
except RuntimeError:
return False
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = len(_UpperCamelCase) - 1
while i > min_viable_chunk_size_index:
_lowerCamelCase : Optional[int] = test_chunk_size(candidates[i])
if not viable:
_lowerCamelCase : str = (min_viable_chunk_size_index + i) // 2
else:
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : List[Any] = (i + len(_UpperCamelCase) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Iterable , _UpperCamelCase : Iterable) ->bool:
"""simple docstring"""
_lowerCamelCase : Dict = True
for aa, aa in zip(_UpperCamelCase , _UpperCamelCase):
assert type(_UpperCamelCase) == type(_UpperCamelCase)
if isinstance(_UpperCamelCase , (list, tuple)):
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase)
elif isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Dict = [v for _, v in sorted(aa.items() , key=lambda _UpperCamelCase: x[0])]
_lowerCamelCase : Any = [v for _, v in sorted(aa.items() , key=lambda _UpperCamelCase: x[0])]
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase)
else:
consistent &= aa == aa
return consistent
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Callable , _UpperCamelCase : tuple , _UpperCamelCase : int , ) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = True
_lowerCamelCase : tuple = tree_map(lambda _UpperCamelCase: a.shape if isinstance(_UpperCamelCase , torch.Tensor) else a , _UpperCamelCase , _UpperCamelCase)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , _UpperCamelCase)
else:
# Otherwise, we can reuse the precomputed value
_lowerCamelCase : List[str] = False
if not consistent:
_lowerCamelCase : Optional[Any] = self._determine_favorable_chunk_size(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_lowerCamelCase : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : str=13 , _UpperCamelCase : Any=7 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Any=False , _UpperCamelCase : Dict=True , _UpperCamelCase : List[str]=99 , _UpperCamelCase : str=32 , _UpperCamelCase : Optional[Any]=5 , _UpperCamelCase : str=4 , _UpperCamelCase : Optional[Any]=64 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : str=512 , _UpperCamelCase : Tuple=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : int=2 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : str=4 , _UpperCamelCase : List[str]=1 , ) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : int = q_groups
_lowerCamelCase : Union[str, Any] = k_groups
_lowerCamelCase : List[str] = v_groups
_lowerCamelCase : Union[str, Any] = post_attention_groups
_lowerCamelCase : List[Any] = intermediate_groups
_lowerCamelCase : int = output_groups
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : int = None
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SqueezeBertModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Dict = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : List[str] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = SqueezeBertForQuestionAnswering(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : Optional[int] = SqueezeBertForSequenceClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : str = SqueezeBertForTokenClassification(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_choices
_lowerCamelCase : Tuple = SqueezeBertForMultipleChoice(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(_lowerCamelCase) : List[str] = config_and_inputs
_lowerCamelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = True
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = SqueezeBertModelTester(self)
_lowerCamelCase : List[str] = ConfigTester(self , config_class=_UpperCamelCase , dim=37)
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = SqueezeBertModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : int = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""")
_lowerCamelCase : Any = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
_lowerCamelCase : int = model(_UpperCamelCase)[0]
_lowerCamelCase : List[str] = torch.Size((1, 3))
self.assertEqual(output.shape , _UpperCamelCase)
_lowerCamelCase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-4))
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = prime_factors(__A )
if is_square_free(__A ):
return -1 if len(__A ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 0 |
from __future__ import annotations
import math
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__A ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
return min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 34_423]
_lowerCamelCase : List[Any] = math.log(len(__A ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __A , __A , __A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase : Dict =None
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : Optional[int] ={
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowerCAmelCase : List[str] ={
"google/rembert": 256,
}
lowerCAmelCase : Dict ="▁"
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RemBertTokenizer
def __init__( self : List[str] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="[CLS]" , _UpperCamelCase : Optional[int]="[SEP]" , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Optional[int]="[SEP]" , _UpperCamelCase : List[Any]="<pad>" , _UpperCamelCase : str="[CLS]" , _UpperCamelCase : Dict="[MASK]" , **_UpperCamelCase : int , ) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : Tuple = do_lower_case
_lowerCamelCase : Tuple = remove_space
_lowerCamelCase : Union[str, Any] = keep_accents
_lowerCamelCase : Union[str, Any] = vocab_file
_lowerCamelCase : Tuple = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase)) + [1] + ([0] * len(_UpperCamelCase)) + [1]
return [1] + ([0] * len(_UpperCamelCase)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : List[str] = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase):
copyfile(self.vocab_file , _UpperCamelCase)
return (out_vocab_file,)
| 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
def A__ ( __A , __A ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowerCamelCase : str = str(bin(__A ) )[2:] # remove the leading "0b"
_lowerCamelCase : Dict = str(bin(__A ) )[2:] # remove the leading "0b"
_lowerCamelCase : Optional[Any] = max(len(__A ) , len(__A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__A ) , b_binary.zfill(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : int ={
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
lowerCAmelCase : str =KEYMAP["up"]
lowerCAmelCase : str =KEYMAP["left"]
if sys.platform == "win32":
lowerCAmelCase : str =[]
lowerCAmelCase : Tuple ={
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : str =ord(str(i))
def A__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_lowerCamelCase : Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__A ) == 0:
# Read the keystroke
_lowerCamelCase : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[str] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__A )
if ord(__A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : Any = chr(KEYMAP["""esc"""] )
except KeyError:
_lowerCamelCase : List[Any] = cha[1]
else:
_lowerCamelCase : List[Any] = ch.decode(__A )
else:
_lowerCamelCase : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Union[str, Any] = termios.tcgetattr(__A )
try:
tty.setraw(__A )
_lowerCamelCase : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(__A , termios.TCSADRAIN , __A )
return ch
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(__A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__A ) == KEYMAP["esc"]:
_lowerCamelCase : int = get_raw_chars()
if ord(__A ) == KEYMAP["mod_int"]:
_lowerCamelCase : Optional[Any] = get_raw_chars()
if ord(__A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['image_processor', 'tokenizer']
_snake_case = 'LayoutLMv3ImageProcessor'
_snake_case = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Dict , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _UpperCamelCase , )
_lowerCamelCase : List[Any] = kwargs.pop("""feature_extractor""")
_lowerCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_UpperCamelCase , _UpperCamelCase)
def __call__( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Union[str, Any] , ) ->BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""")
# first, apply the image processor
_lowerCamelCase : str = self.image_processor(images=_UpperCamelCase , return_tensors=_UpperCamelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCamelCase : Union[str, Any] = features["""words"""]
_lowerCamelCase : Optional[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel values
_lowerCamelCase : List[Any] = features.pop("""pixel_values""")
if return_overflowing_tokens is True:
_lowerCamelCase : List[str] = self.get_overflowing_images(_UpperCamelCase , encoded_inputs["""overflow_to_sample_mapping"""])
_lowerCamelCase : int = images
return encoded_inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCamelCase) != len(_UpperCamelCase):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F""" {len(_UpperCamelCase)} and {len(_UpperCamelCase)}""")
return images_with_overflow
def _SCREAMING_SNAKE_CASE ( self : List[str] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[Any]) ->str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *_UpperCamelCase : int , **_UpperCamelCase : Any) ->int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCamelCase , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCamelCase , )
return self.image_processor
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase : Optional[int] =logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase : Dict ="cuda" if torch.cuda.is_available() else "cpu"
def A__ ( __A , __A=100 , __A=" " ):
'''simple docstring'''
_lowerCamelCase : Any = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else """""" )
texts.append(__A )
return {"title": titles, "text": texts}
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__A , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
_lowerCamelCase : Optional[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A__ ( __A , __A , __A , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowerCamelCase : str = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowerCamelCase : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowerCamelCase : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
_lowerCamelCase : List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowerCamelCase : List[str] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
_lowerCamelCase : Dict = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
_lowerCamelCase : Optional[int] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowerCamelCase : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__A )
# And save the index
_lowerCamelCase : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
_snake_case = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
_snake_case = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
_snake_case = field(
default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=__lowerCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
_snake_case = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
_snake_case = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase : List[Any] =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase : Dict =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase : List[str] =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
from collections.abc import Callable
import numpy as np
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCamelCase : int = np.zeros((n + 1,) )
_lowerCamelCase : Dict = ya
_lowerCamelCase : Tuple = xa
for k in range(__A ):
_lowerCamelCase : int = y[k] + step_size * ode_func(__A , y[k] )
_lowerCamelCase : Optional[Any] = y[k] + (
(step_size / 2) * (ode_func(__A , y[k] ) + ode_func(x + step_size , __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]=13 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : str=4 , _UpperCamelCase : Optional[int]=[10, 20, 30, 40] , _UpperCamelCase : int=[2, 2, 3, 2] , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : int=10 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _UpperCamelCase : str=[2, 3, 4] , _UpperCamelCase : Optional[Any]=None , ) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : str = image_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Optional[Any] = num_stages
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Optional[Any] = depths
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = out_features
_lowerCamelCase : Any = out_indices
_lowerCamelCase : Dict = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ConvNextVaModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : List[str] = model(_UpperCamelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = ConvNextVaForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Dict = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ConvNextVaBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Union[str, Any] = model(_UpperCamelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : List[str] = ConvNextVaBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str = ConvNextVaModelTester(self)
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : int = True
if model_class.__name__ in [
*get_values(_UpperCamelCase),
*get_values(_UpperCamelCase),
]:
continue
_lowerCamelCase : Dict = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.train()
_lowerCamelCase : int = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase)
_lowerCamelCase : List[str] = model(**_UpperCamelCase).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase), *get_values(_UpperCamelCase)]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCamelCase : List[Any] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : str = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = model(**_UpperCamelCase).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(_UpperCamelCase)
_lowerCamelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict):
_lowerCamelCase : Optional[Any] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[int] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = ConvNextVaModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""").to(_UpperCamelCase)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Optional[int] = preprocessor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : str = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Any = []
for rt in rc.restypes:
_lowerCamelCase : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCamelCase : List[Any] = {name: i for i, name in enumerate(__A )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCamelCase : List[str] = torch.tensor(
__A , dtype=torch.intaa , device=protein["""aatype"""].device , )
_lowerCamelCase : Optional[int] = torch.tensor(
__A , dtype=torch.intaa , device=protein["""aatype"""].device , )
_lowerCamelCase : Optional[int] = torch.tensor(
__A , dtype=torch.floataa , device=protein["""aatype"""].device , )
_lowerCamelCase : Union[str, Any] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCamelCase : Tuple = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : List[Any] = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : Optional[Any] = residx_atomaa_mask
_lowerCamelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCamelCase : Dict = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : Any = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCamelCase : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCamelCase : Optional[Any] = rc.restype_atoa[restype_letter]
_lowerCamelCase : int = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCamelCase : Tuple = rc.atom_order[atom_name]
_lowerCamelCase : Any = 1
_lowerCamelCase : str = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : List[str] = residx_atomaa_mask
return protein
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = tree_map(lambda __A : torch.tensor(__A , device=batch["""aatype"""].device ) , __A , np.ndarray )
_lowerCamelCase : Optional[Any] = tensor_tree_map(lambda __A : np.array(__A ) , make_atomaa_masks(__A ) )
return out
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A__ ( __A , __A=7 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = None
if token is not None:
_lowerCamelCase : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_lowerCamelCase : int = """636036"""
_lowerCamelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_lowerCamelCase : List[Any] = requests.get(__A , headers=__A ).json()
return result["workflow_runs"]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_daily_ci_runs(__A )
_lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_lowerCamelCase : str = workflow_run["""id"""]
break
return workflow_run_id
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = get_last_daily_ci_runs(__A )
if workflow_run_id is not None:
_lowerCamelCase : Tuple = get_artifacts_links(worflow_run_id=__A , token=__A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_lowerCamelCase : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__A , artifact_url=__A , output_dir=__A , token=__A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
get_last_daily_ci_artifacts(__A , __A , __A )
_lowerCamelCase : List[str] = {}
for artifact_name in artifact_names:
_lowerCamelCase : str = os.path.join(__A , F"""{artifact_name}.zip""" )
if os.path.isfile(__A ):
_lowerCamelCase : Tuple = {}
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
with z.open(__A ) as f:
_lowerCamelCase : Any = f.read().decode("""UTF-8""" )
return results
| 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case : Tuple = ['image_processor', 'tokenizer']
_snake_case : Union[str, Any] = 'BlipImageProcessor'
_snake_case : Optional[Any] = 'AutoTokenizer'
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple) ->str:
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase)
# add QFormer tokenizer
_lowerCamelCase : Dict = qformer_tokenizer
def __call__( self : Union[str, Any] , _UpperCamelCase : ImageInput = None , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Dict , ) ->BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""")
_lowerCamelCase : Tuple = BatchFeature()
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
encoding.update(_UpperCamelCase)
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : str = qformer_text_encoding.pop("""input_ids""")
_lowerCamelCase : Any = qformer_text_encoding.pop("""attention_mask""")
if images is not None:
_lowerCamelCase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase)
encoding.update(_UpperCamelCase)
return encoding
def _SCREAMING_SNAKE_CASE ( self : int , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCamelCase : List[str] , **_UpperCamelCase : Any) ->List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer.model_input_names
_lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , **_UpperCamelCase : Union[str, Any]) ->str:
"""simple docstring"""
if os.path.isfile(_UpperCamelCase):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase)
_lowerCamelCase : str = os.path.join(_UpperCamelCase , """qformer_tokenizer""")
self.qformer_tokenizer.save_pretrained(_UpperCamelCase)
return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , _UpperCamelCase : str , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="""qformer_tokenizer""")
_lowerCamelCase : str = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase)
args.append(_UpperCamelCase)
return cls(*_UpperCamelCase)
| 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = len(__A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowerCamelCase : str = i + 1
else:
_lowerCamelCase : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase : Optional[int] =TypeVar("T")
class __snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : list[T] , _UpperCamelCase : Callable[[T, T], T]) ->None:
"""simple docstring"""
_lowerCamelCase : Any | T = None
_lowerCamelCase : int = len(_UpperCamelCase)
_lowerCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
_lowerCamelCase : List[Any] = fnc
self.build()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1):
_lowerCamelCase : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : T) ->None:
"""simple docstring"""
p += self.N
_lowerCamelCase : List[Any] = v
while p > 1:
_lowerCamelCase : Union[str, Any] = p // 2
_lowerCamelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int) ->T | None: # noqa: E741
"""simple docstring"""
_lowerCamelCase : List[str] = l + self.N, r + self.N
_lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
_lowerCamelCase : int = self.st[l] if res is None else self.fn(_UpperCamelCase , self.st[l])
if r % 2 == 0:
_lowerCamelCase : List[Any] = self.st[r] if res is None else self.fn(_UpperCamelCase , self.st[r])
_lowerCamelCase : Tuple = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase : Optional[int] =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowerCAmelCase : str ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowerCAmelCase : Union[str, Any] =SegmentTree(test_array, min)
lowerCAmelCase : Any =SegmentTree(test_array, max)
lowerCAmelCase : Any =SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
_lowerCamelCase : Optional[int] = reduce(__A , test_array[i : j + 1] )
_lowerCamelCase : Union[str, Any] = reduce(__A , test_array[i : j + 1] )
_lowerCamelCase : Union[str, Any] = reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase : Union[str, Any] =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCamelCase : List[Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCamelCase : Optional[Any] = {"""unk_token""": """<unk>"""}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(_UpperCamelCase))
_lowerCamelCase : List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : int) ->Dict:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **_UpperCamelCase : List[str]) ->List[Any]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
_lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : str = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCamelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Tuple = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : Any = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Tuple = """lower newer"""
_lowerCamelCase : Optional[int] = processor(text=_UpperCamelCase)
_lowerCamelCase : int = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = """lower newer"""
_lowerCamelCase : Optional[int] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[str] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.batch_decode(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """lower newer"""
_lowerCamelCase : Any = self.prepare_image_inputs()
_lowerCamelCase : List[str] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.