code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import math
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__A ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
return min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 34_423]
_lowerCamelCase : List[Any] = math.log(len(__A ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __A , __A , __A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase : List[str] =[
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str = None , _UpperCamelCase : list = None) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : str = os.path.abspath(os.path.join("""examples""" , """by_feature"""))
_lowerCamelCase : Any = os.path.abspath("""examples""")
for item in os.listdir(_UpperCamelCase):
if item not in EXCLUDE_EXAMPLES:
_lowerCamelCase : int = os.path.join(_UpperCamelCase , _UpperCamelCase)
if os.path.isfile(_UpperCamelCase) and ".py" in item_path:
with self.subTest(
tested_script=_UpperCamelCase , feature_script=_UpperCamelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
_lowerCamelCase : Dict = compare_against_test(
os.path.join(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = """\n""".join(_UpperCamelCase)
if special_strings is not None:
for string in special_strings:
_lowerCamelCase : Union[str, Any] = diff.replace(_UpperCamelCase , """""")
self.assertEqual(_UpperCamelCase , """""")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _UpperCamelCase)
self.one_complete_example("""complete_nlp_example.py""" , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = os.path.abspath(os.path.join("""examples""" , """cv_example.py"""))
_lowerCamelCase : Dict = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.one_complete_example("""complete_cv_example.py""" , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any) ->List[Any]:
"""simple docstring"""
super().setUpClass()
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = os.path.join(cls._tmpdir , """default_config.yml""")
write_basic_config(save_location=cls.configPath)
_lowerCamelCase : List[str] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict) ->Optional[Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""")))
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_lowerCamelCase : Any = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""")))
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}
""".split()
_lowerCamelCase : Any = run_command(self._launch_args + testargs , return_stdout=_UpperCamelCase)
self.assertNotIn("""epoch 0:""" , _UpperCamelCase)
self.assertIn("""epoch 1:""" , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : Any = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}
""".split()
_lowerCamelCase : Dict = run_command(self._launch_args + testargs , return_stdout=_UpperCamelCase)
if torch.cuda.is_available():
_lowerCamelCase : List[str] = torch.cuda.device_count()
else:
_lowerCamelCase : Dict = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _UpperCamelCase)
self.assertIn("""epoch 1:""" , _UpperCamelCase)
else:
self.assertIn("""epoch 0:""" , _UpperCamelCase)
self.assertIn("""epoch 1:""" , _UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""}):
_lowerCamelCase : Any = run_command(self._launch_args + testargs , return_stdout=_UpperCamelCase)
_lowerCamelCase : List[Any] = re.findall("""({.+})""" , _UpperCamelCase)
_lowerCamelCase : List[Any] = [r for r in results if """accuracy""" in r][-1]
_lowerCamelCase : Optional[Any] = ast.literal_eval(_UpperCamelCase)
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""})
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_lowerCamelCase : int = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , """tracking""")))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs)
| 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'data2vec-audio'
def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Optional[Any]=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : List[Any]=3072 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : int=1E-5 , _UpperCamelCase : str="gelu" , _UpperCamelCase : List[str]=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : List[str]=19 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : str=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : str=10 , _UpperCamelCase : Any=0 , _UpperCamelCase : Any="sum" , _UpperCamelCase : str=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[Any]=256 , _UpperCamelCase : List[str]=(512, 512, 512, 512, 1500) , _UpperCamelCase : Optional[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Optional[int]=(1, 2, 3, 1, 1) , _UpperCamelCase : List[str]=512 , _UpperCamelCase : Tuple=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : str=False , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : str=None , **_UpperCamelCase : List[Any] , ) ->str:
"""simple docstring"""
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = feat_extract_activation
_lowerCamelCase : Union[str, Any] = list(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = list(_UpperCamelCase)
_lowerCamelCase : Dict = list(_UpperCamelCase)
_lowerCamelCase : int = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[str] = num_conv_pos_embedding_groups
_lowerCamelCase : int = conv_pos_kernel_size
_lowerCamelCase : Dict = len(self.conv_dim)
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : Tuple = feat_proj_dropout
_lowerCamelCase : Optional[Any] = final_dropout
_lowerCamelCase : str = layerdrop
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : Any = mask_time_length
_lowerCamelCase : str = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : str = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# adapter
_lowerCamelCase : List[str] = add_adapter
_lowerCamelCase : Dict = adapter_kernel_size
_lowerCamelCase : str = adapter_stride
_lowerCamelCase : Union[str, Any] = num_adapter_layers
_lowerCamelCase : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(_UpperCamelCase)
_lowerCamelCase : List[Any] = list(_UpperCamelCase)
_lowerCamelCase : Optional[int] = list(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
return math.prod(self.conv_stride)
| 15 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 1 |
from math import ceil
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = list(range(0 , __A ) )
_lowerCamelCase : List[str] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCamelCase : Any = []
for i in device_map_blocks:
if device_map_blocks.count(__A ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__A )
# Missing blocks
_lowerCamelCase : Any = [i for i in blocks if i not in device_map_blocks]
_lowerCamelCase : str = [i for i in device_map_blocks if i not in blocks]
if len(__A ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(__A ) )
if len(__A ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(__A ) )
if len(__A ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(__A ) )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = list(range(__A ) )
_lowerCamelCase : Optional[int] = int(ceil(n_layers / len(__A ) ) )
_lowerCamelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 , __A , __A )]
return dict(zip(__A , __A ) )
| 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 1 |
lowerCAmelCase : Optional[Any] ="\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase : Union[str, Any] =[{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase : Optional[Any] ={
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : int =logging.get_logger(__name__)
def A__ ( __A ):
'''simple docstring'''
# initialize config
if "resnet-50" in model_name:
_lowerCamelCase : List[Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_lowerCamelCase : str = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_lowerCamelCase : Optional[Any] = DetrConfig(use_timm_backbone=__A , backbone_config=__A )
# set label attributes
_lowerCamelCase : Dict = """panoptic""" in model_name
if is_panoptic:
_lowerCamelCase : Any = 250
else:
_lowerCamelCase : List[str] = 91
_lowerCamelCase : Optional[int] = """huggingface/label-files"""
_lowerCamelCase : int = """coco-detection-id2label.json"""
_lowerCamelCase : int = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
_lowerCamelCase : Any = {int(__A ): v for k, v in idalabel.items()}
_lowerCamelCase : str = idalabel
_lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A__ ( __A ):
'''simple docstring'''
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = state_dict.pop(__A )
_lowerCamelCase : Dict = val
def A__ ( __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = """"""
if is_panoptic:
_lowerCamelCase : Tuple = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : List[str] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:256, :]
_lowerCamelCase : int = in_proj_bias[:256]
_lowerCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_lowerCamelCase : Tuple = in_proj_bias[256:512]
_lowerCamelCase : List[Any] = in_proj_weight[-256:, :]
_lowerCamelCase : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:256, :]
_lowerCamelCase : List[str] = in_proj_bias[:256]
_lowerCamelCase : Dict = in_proj_weight[256:512, :]
_lowerCamelCase : Union[str, Any] = in_proj_bias[256:512]
_lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :]
_lowerCamelCase : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCamelCase : Union[str, Any] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCamelCase : Any = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCamelCase : int = in_proj_weight_cross_attn[:256, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[:256]
_lowerCamelCase : List[str] = in_proj_weight_cross_attn[256:512, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[256:512]
_lowerCamelCase : Any = in_proj_weight_cross_attn[-256:, :]
_lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCamelCase : str = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A__ ( __A , __A=None , __A=False ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[str] = get_detr_config(__A )
# load original model from torch hub
_lowerCamelCase : Any = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"""Converting model {model_name}...""" )
_lowerCamelCase : Optional[int] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=__A ).eval()
_lowerCamelCase : Optional[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__A ):
if is_panoptic:
_lowerCamelCase : Optional[int] = """detr.""" + src
rename_key(__A , __A , __A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : str = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_lowerCamelCase : Tuple = state_dict.pop(__A )
_lowerCamelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : int = state_dict.pop(__A )
_lowerCamelCase : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_lowerCamelCase : List[str] = state_dict.pop(__A )
_lowerCamelCase : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_lowerCamelCase : Any = state_dict.pop(__A )
_lowerCamelCase : Any = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : List[str] = DetrForSegmentation(__A ) if is_panoptic else DetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
# verify our conversion on an image
_lowerCamelCase : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_lowerCamelCase : List[Any] = DetrImageProcessor(format=__A )
_lowerCamelCase : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCamelCase : Optional[int] = encoding["""pixel_values"""]
_lowerCamelCase : List[Any] = detr(__A )
_lowerCamelCase : Tuple = model(__A )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowerCAmelCase : int =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : int ={
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'roc_bert'
def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any]=3_0522 , _UpperCamelCase : int=768 , _UpperCamelCase : Any=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=1E-1_2 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : int="absolute" , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Tuple=768 , _UpperCamelCase : int=910 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : List[str]=2_4858 , _UpperCamelCase : Tuple=True , **_UpperCamelCase : Optional[Any] , ) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = vocab_size
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : List[str] = use_cache
_lowerCamelCase : List[Any] = enable_pronunciation
_lowerCamelCase : List[str] = enable_shape
_lowerCamelCase : Dict = pronunciation_embed_dim
_lowerCamelCase : Optional[Any] = pronunciation_vocab_size
_lowerCamelCase : str = shape_embed_dim
_lowerCamelCase : List[Any] = shape_vocab_size
_lowerCamelCase : int = concat_input
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase)
| 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A__ ( __A , __A , __A , __A , __A=True , __A="pt" ):
'''simple docstring'''
_lowerCamelCase : int = {"""add_prefix_space""": True} if isinstance(__A , __A ) and not line.startswith(""" """ ) else {}
_lowerCamelCase : List[Any] = padding_side
return tokenizer(
[line] , max_length=__A , padding="""max_length""" if pad_to_max_length else None , truncation=__A , return_tensors=__A , add_special_tokens=__A , **__A , )
def A__ ( __A , __A , __A=None , ):
'''simple docstring'''
_lowerCamelCase : List[str] = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any="train" , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Any="" , ) ->str:
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = Path(_UpperCamelCase).joinpath(type_path + """.source""")
_lowerCamelCase : Any = Path(_UpperCamelCase).joinpath(type_path + """.target""")
_lowerCamelCase : str = self.get_char_lens(self.src_file)
_lowerCamelCase : str = max_source_length
_lowerCamelCase : Optional[int] = max_target_length
assert min(self.src_lens) > 0, F"""found empty line in {self.src_file}"""
_lowerCamelCase : str = tokenizer
_lowerCamelCase : List[str] = prefix
if n_obs is not None:
_lowerCamelCase : Tuple = self.src_lens[:n_obs]
_lowerCamelCase : Dict = src_lang
_lowerCamelCase : str = tgt_lang
def __len__( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Optional[int] , _UpperCamelCase : str) ->Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCamelCase : Tuple = index + 1 # linecache starts at 1
_lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) , _UpperCamelCase).rstrip("""\n""")
_lowerCamelCase : int = linecache.getline(str(self.tgt_file) , _UpperCamelCase).rstrip("""\n""")
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase) else self.tokenizer
)
_lowerCamelCase : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase) else self.tokenizer
_lowerCamelCase : Union[str, Any] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , """right""")
_lowerCamelCase : Optional[int] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , """right""")
_lowerCamelCase : Any = source_inputs["""input_ids"""].squeeze()
_lowerCamelCase : Tuple = target_inputs["""input_ids"""].squeeze()
_lowerCamelCase : Dict = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : int) ->Optional[Any]:
"""simple docstring"""
return [len(_UpperCamelCase) for x in Path(_UpperCamelCase).open().readlines()]
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int) ->Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCamelCase : Dict = torch.stack([x["""input_ids"""] for x in batch])
_lowerCamelCase : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch])
_lowerCamelCase : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch])
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase)
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase)
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Optional[Any] = trim_batch(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : List[Any] = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase)
_lowerCamelCase : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowerCAmelCase : Dict =getLogger(__name__)
def A__ ( __A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : str = get_git_info()
save_json(__A , os.path.join(__A , """git_log.json""" ) )
def A__ ( __A , __A , __A=4 , **__A ):
'''simple docstring'''
with open(__A , """w""" ) as f:
json.dump(__A , __A , indent=__A , **__A )
def A__ ( __A ):
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = git.Repo(search_parent_directories=__A )
_lowerCamelCase : Any = {
"""repo_id""": str(__A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def A__ ( __A , __A ):
'''simple docstring'''
return list(map(__A , __A ) )
def A__ ( __A , __A ):
'''simple docstring'''
with open(__A , """wb""" ) as f:
return pickle.dump(__A , __A )
def A__ ( __A ):
'''simple docstring'''
def remove_articles(__A ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
_lowerCamelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = normalize_answer(__A ).split()
_lowerCamelCase : Optional[int] = normalize_answer(__A ).split()
_lowerCamelCase : int = Counter(__A ) & Counter(__A )
_lowerCamelCase : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : Union[str, Any] = 1.0 * num_same / len(__A )
_lowerCamelCase : Tuple = 1.0 * num_same / len(__A )
_lowerCamelCase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def A__ ( __A , __A ):
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def A__ ( __A , __A ):
'''simple docstring'''
assert len(__A ) == len(__A )
_lowerCamelCase : Tuple = 0
for hypo, pred in zip(__A , __A ):
em += exact_match_score(__A , __A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def A__ ( __A ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : int = """dropout_rate"""
for p in extra_params:
if getattr(__A , __A , __A ):
if not hasattr(__A , __A ) and not hasattr(__A , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__A ) )
delattr(__A , __A )
continue
_lowerCamelCase : str = p if hasattr(__A , __A ) else equivalent_param[p]
setattr(__A , __A , getattr(__A , __A ) )
delattr(__A , __A )
return hparams, config
| 15 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 15 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Any =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = XLMRobertaTokenizer
_snake_case = XLMRobertaTokenizerFast
_snake_case = True
_snake_case = True
def _SCREAMING_SNAKE_CASE ( self : int) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Union[str, Any] = XLMRobertaTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = """<pad>"""
_lowerCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(_UpperCamelCase) , 1002)
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase)
_lowerCamelCase : Any = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Optional[int] = tokenizer_r.save_pretrained(_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(_UpperCamelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
_lowerCamelCase : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase)
# Checks everything loads correctly in the same way
_lowerCamelCase : str = tokenizer_r.from_pretrained(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(_UpperCamelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCamelCase)
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Dict = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(_UpperCamelCase)
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase)
# Checks everything loads correctly in the same way
_lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(_UpperCamelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase))
shutil.rmtree(_UpperCamelCase)
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : Dict = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase)
_lowerCamelCase : Any = tokenizer_p.save_pretrained(_UpperCamelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_lowerCamelCase : List[str] = tokenizer_r.from_pretrained(_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(_UpperCamelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase))
shutil.rmtree(_UpperCamelCase)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCamelCase , f.name)
_lowerCamelCase : Tuple = XLMRobertaTokenizer(f.name , keep_accents=_UpperCamelCase)
_lowerCamelCase : List[Any] = pickle.dumps(_UpperCamelCase)
pickle.loads(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : int = self.get_rust_tokenizer()
_lowerCamelCase : List[Any] = """I was born in 92000, and this is falsé."""
_lowerCamelCase : List[Any] = tokenizer.tokenize(_UpperCamelCase)
_lowerCamelCase : Tuple = rust_tokenizer.tokenize(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase)
_lowerCamelCase : List[Any] = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = self.get_rust_tokenizer()
_lowerCamelCase : Any = tokenizer.encode(_UpperCamelCase)
_lowerCamelCase : Any = rust_tokenizer.encode(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """Hello World!"""
_lowerCamelCase : Tuple = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase))
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCamelCase : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 15 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase : Dict =None
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : Optional[int] ={
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowerCAmelCase : List[str] ={
"google/rembert": 256,
}
lowerCAmelCase : Dict ="▁"
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RemBertTokenizer
def __init__( self : List[str] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="[CLS]" , _UpperCamelCase : Optional[int]="[SEP]" , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Optional[int]="[SEP]" , _UpperCamelCase : List[Any]="<pad>" , _UpperCamelCase : str="[CLS]" , _UpperCamelCase : Dict="[MASK]" , **_UpperCamelCase : int , ) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : Tuple = do_lower_case
_lowerCamelCase : Tuple = remove_space
_lowerCamelCase : Union[str, Any] = keep_accents
_lowerCamelCase : Union[str, Any] = vocab_file
_lowerCamelCase : Tuple = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase)) + [1] + ([0] * len(_UpperCamelCase)) + [1]
return [1] + ([0] * len(_UpperCamelCase)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : List[str] = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase):
copyfile(self.vocab_file , _UpperCamelCase)
return (out_vocab_file,)
| 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : int=64 , _UpperCamelCase : Any=None) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = np.random.default_rng(_UpperCamelCase)
_lowerCamelCase : Dict = length
_lowerCamelCase : Union[str, Any] = rng.normal(size=(length,)).astype(np.floataa)
_lowerCamelCase : Tuple = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self : Dict) ->Any:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , _UpperCamelCase : str) ->Optional[int]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple=0 , _UpperCamelCase : str=0 , _UpperCamelCase : Optional[Any]=False) ->Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3]).float())
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3]).float())
_lowerCamelCase : List[Any] = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]=None) ->Optional[int]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
_lowerCamelCase : Union[str, Any] = False
return x * self.a[0] + self.b[0]
class __snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=False) ->Optional[int]:
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[Any] = torch.nn.Parameter(torch.tensor(_UpperCamelCase).float())
_lowerCamelCase : int = torch.nn.Parameter(torch.tensor(_UpperCamelCase).float())
_lowerCamelCase : List[Any] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : int=None) ->List[str]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
_lowerCamelCase : str = False
return x * self.a + self.b
def A__ ( __A , __A = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCamelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
_lowerCamelCase : int = load_dataset("""csv""" , data_files=__A )
_lowerCamelCase : int = datasets["""train"""].unique("""label""" )
_lowerCamelCase : int = {v: i for i, v in enumerate(__A )}
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : List[str] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A , padding="""max_length""" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
__A , batched=__A , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__A , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=2 )
_lowerCamelCase : Optional[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=1 )
return train_dataloader, eval_dataloader
| 15 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 1 |
import math
import random
def A__ ( __A , __A = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase : Optional[int] =0.02
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
_lowerCamelCase : Dict = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_lowerCamelCase : Optional[int] = (expected / 100) - layer_a
# Error delta
_lowerCamelCase : List[Any] = layer_1_error * sigmoid_function(__A , __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Optional[int] =int(input("Expected value: "))
lowerCAmelCase : Any =int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 15 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 1 |
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCamelCase : List[str] = mf_knapsack(i - 1 , __A , __A , __A )
else:
_lowerCamelCase : Any = max(
mf_knapsack(i - 1 , __A , __A , __A ) , mf_knapsack(i - 1 , __A , __A , j - wt[i - 1] ) + val[i - 1] , )
_lowerCamelCase : Tuple = val
return f[i][j]
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowerCamelCase : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowerCamelCase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def A__ ( __A , __A , __A ):
'''simple docstring'''
if not (isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_lowerCamelCase : List[str] = len(__A )
if num_items != len(__A ):
_lowerCamelCase : Union[str, Any] = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(__A )} values"""
)
raise ValueError(__A )
for i in range(__A ):
if not isinstance(wt[i] , __A ):
_lowerCamelCase : Tuple = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__A )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = knapsack(__A , __A , __A , __A )
_lowerCamelCase : set = set()
_construct_solution(__A , __A , __A , __A , __A )
return optimal_val, example_optional_set
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__A , __A , i - 1 , __A , __A )
else:
optimal_set.add(__A )
_construct_solution(__A , __A , i - 1 , j - wt[i - 1] , __A )
if __name__ == "__main__":
lowerCAmelCase : Any =[3, 2, 4, 4]
lowerCAmelCase : str =[4, 3, 2, 3]
lowerCAmelCase : Dict =4
lowerCAmelCase : str =6
lowerCAmelCase : List[str] =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase , lowerCAmelCase : Optional[Any] =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 15 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 15 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 1 |
from __future__ import annotations
def A__ ( __A ): # This function is recursive
'''simple docstring'''
_lowerCamelCase : Tuple = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_lowerCamelCase : Optional[Any] = array[0]
_lowerCamelCase : List[str] = False
_lowerCamelCase : Dict = 1
_lowerCamelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_lowerCamelCase : Dict = True
_lowerCamelCase : int = [element for element in array[i:] if element >= array[i]]
_lowerCamelCase : Tuple = longest_subsequence(__A )
if len(__A ) > len(__A ):
_lowerCamelCase : Optional[int] = temp_array
else:
i += 1
_lowerCamelCase : Optional[Any] = [element for element in array[1:] if element >= pivot]
_lowerCamelCase : Tuple = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=13 , _UpperCamelCase : Union[str, Any]=[30, 30] , _UpperCamelCase : str=2 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : int=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : str=5 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Optional[int]=37 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=8 , _UpperCamelCase : List[str]=10 , ) ->str:
"""simple docstring"""
_lowerCamelCase : Any = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : Optional[int] = scope
_lowerCamelCase : Dict = n_targets
_lowerCamelCase : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_lowerCamelCase : Optional[int] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_lowerCamelCase : List[str] = num_patches + 1 + self.num_detection_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
_lowerCamelCase : List[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_lowerCamelCase : Union[str, Any] = []
for i in range(self.batch_size):
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCamelCase)
_lowerCamelCase : Any = torch.rand(self.n_targets , 4 , device=_UpperCamelCase)
labels.append(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = YolosModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = YolosForObjectDetection(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Dict = model(pixel_values=_UpperCamelCase)
_lowerCamelCase : Tuple = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
_lowerCamelCase : Dict = model(pixel_values=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_snake_case = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=False) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_lowerCamelCase : Dict = []
for i in range(self.model_tester.batch_size):
_lowerCamelCase : Dict = {}
_lowerCamelCase : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCamelCase , dtype=torch.long)
_lowerCamelCase : Any = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCamelCase , dtype=torch.float)
labels.append(_UpperCamelCase)
_lowerCamelCase : Any = labels
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int) ->Dict:
"""simple docstring"""
_lowerCamelCase : int = YolosModelTester(self)
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(_UpperCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(_UpperCamelCase)
_lowerCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = True
# in YOLOS, the seq_len is different
_lowerCamelCase : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Tuple = outputs.attentions
self.assertEqual(len(_UpperCamelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : str = True
_lowerCamelCase : int = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCamelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowerCamelCase : Dict = len(_UpperCamelCase)
# Check attention is always last and order is fine
_lowerCamelCase : str = True
_lowerCamelCase : Dict = True
_lowerCamelCase : Tuple = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : int = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCamelCase))
_lowerCamelCase : str = outputs.attentions
self.assertEqual(len(_UpperCamelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int):
_lowerCamelCase : List[str] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
# YOLOS has a different seq_length
_lowerCamelCase : Any = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = YolosModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""").to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(inputs.pixel_values)
# verify outputs
_lowerCamelCase : str = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_UpperCamelCase , )
_lowerCamelCase : List[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1E-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCamelCase , atol=1E-4))
# verify postprocessing
_lowerCamelCase : List[str] = image_processor.post_process_object_detection(
_UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
_lowerCamelCase : List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(_UpperCamelCase)
_lowerCamelCase : str = [75, 75, 17, 63, 17]
_lowerCamelCase : Union[str, Any] = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5]).to(_UpperCamelCase)
self.assertEqual(len(results["""scores"""]) , 5)
self.assertTrue(torch.allclose(results["""scores"""] , _UpperCamelCase , atol=1E-4))
self.assertSequenceEqual(results["""labels"""].tolist() , _UpperCamelCase)
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _UpperCamelCase))
| 15 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : List[str] =["text", "image", "audio"]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __snake_case :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs"""))
self.assertTrue(hasattr(self.tool , """outputs"""))
_lowerCamelCase : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input , _UpperCamelCase):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
_lowerCamelCase : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = create_inputs(self.tool.inputs)
_lowerCamelCase : Optional[int] = self.tool(*_UpperCamelCase)
# There is a single output
if len(self.tool.outputs) == 1:
_lowerCamelCase : Optional[Any] = [outputs]
self.assertListEqual(output_types(_UpperCamelCase) , self.tool.outputs)
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description"""))
self.assertTrue(hasattr(self.tool , """default_checkpoint"""))
self.assertTrue(self.tool.description.startswith("""This is a tool that"""))
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = create_inputs(self.tool.inputs)
_lowerCamelCase : Union[str, Any] = self.tool(*_UpperCamelCase)
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Union[str, Any] = [outputs]
self.assertEqual(len(_UpperCamelCase) , len(self.tool.outputs))
for output, output_type in zip(_UpperCamelCase , self.tool.outputs):
_lowerCamelCase : str = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = create_inputs(self.tool.inputs)
_lowerCamelCase : str = []
for _input, input_type in zip(_UpperCamelCase , self.tool.inputs):
if isinstance(_UpperCamelCase , _UpperCamelCase):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
_lowerCamelCase : Tuple = self.tool(*_UpperCamelCase)
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : int = [outputs]
self.assertEqual(len(_UpperCamelCase) , len(self.tool.outputs))
| 15 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import doctest
from collections import deque
import numpy as np
class __snake_case :
'''simple docstring'''
def __init__( self : Any) ->None:
"""simple docstring"""
_lowerCamelCase : int = [2, 1, 2, -1]
_lowerCamelCase : Union[str, Any] = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : int) ->list[float]:
"""simple docstring"""
_lowerCamelCase : Tuple = len(self.first_signal)
_lowerCamelCase : Tuple = len(self.second_signal)
_lowerCamelCase : Dict = max(_UpperCamelCase , _UpperCamelCase)
# create a zero matrix of max_length x max_length
_lowerCamelCase : Tuple = [[0] * max_length for i in range(_UpperCamelCase)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCamelCase):
_lowerCamelCase : Tuple = deque(self.second_signal)
rotated_signal.rotate(_UpperCamelCase)
for j, item in enumerate(_UpperCamelCase):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCamelCase : Dict = np.matmul(np.transpose(_UpperCamelCase) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(_UpperCamelCase , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 15 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 1 |
from scipy.stats import spearmanr
import datasets
lowerCAmelCase : Dict ="\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
lowerCAmelCase : Optional[Any] ="\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
lowerCAmelCase : List[str] =r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float"""),
"""references""": datasets.Value("""float"""),
}) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=False) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = spearmanr(_UpperCamelCase , _UpperCamelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 1 |
import argparse
import struct
import unittest
class __snake_case :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : bytes) ->None:
"""simple docstring"""
_lowerCamelCase : str = data
# Initialize hash values
_lowerCamelCase : Any = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
_lowerCamelCase : int = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
_lowerCamelCase : Union[str, Any] = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : bytes) ->bytes:
"""simple docstring"""
_lowerCamelCase : Optional[int] = b"""\x80""" + (b"""\x00""" * (63 - (len(_UpperCamelCase) + 8) % 64))
_lowerCamelCase : List[Any] = struct.pack(""">Q""" , (len(_UpperCamelCase) * 8))
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->None:
"""simple docstring"""
_lowerCamelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCamelCase : Dict = list(struct.unpack(""">16L""" , _UpperCamelCase))
# add 48 0-ed integers
words += [0] * 48
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCamelCase : Tuple = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
_lowerCamelCase : Any = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
_lowerCamelCase : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCamelCase : Optional[int] = self.ror(_UpperCamelCase , 6) ^ self.ror(_UpperCamelCase , 11) ^ self.ror(_UpperCamelCase , 25)
_lowerCamelCase : str = (e & f) ^ ((~e & 0xffff_ffff) & g)
_lowerCamelCase : Union[str, Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCamelCase : List[str] = self.ror(_UpperCamelCase , 2) ^ self.ror(_UpperCamelCase , 13) ^ self.ror(_UpperCamelCase , 22)
_lowerCamelCase : List[str] = (a & b) ^ (a & c) ^ (b & c)
_lowerCamelCase : Dict = (sa + maj) % 0x1_0000_0000
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCamelCase : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCamelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes)
]
_lowerCamelCase : Union[str, Any] = """""".join([hex(_UpperCamelCase)[2:].zfill(8) for value in self.hashes])
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int) ->int:
"""simple docstring"""
return 0xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->None:
"""simple docstring"""
import hashlib
_lowerCamelCase : str = bytes("""Test String""" , """utf-8""")
self.assertEqual(SHAaaa(_UpperCamelCase).hash , hashlib.shaaaa(_UpperCamelCase).hexdigest())
def A__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Union[str, Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_lowerCamelCase : Tuple = f.read()
else:
_lowerCamelCase : List[Any] = bytes(__A , """utf-8""" )
print(SHAaaa(__A ).hash )
if __name__ == "__main__":
main()
| 15 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 1 |
def A__ ( __A , __A , __A ):
'''simple docstring'''
if len(__A ) != len(__A ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCamelCase : Dict = [p / w for p, w in zip(__A , __A )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCamelCase : Union[str, Any] = sorted(__A )
# declaring useful variables
_lowerCamelCase : List[str] = len(__A )
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : int = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCamelCase : str = sorted_profit_by_weight[length - i - 1]
_lowerCamelCase : int = profit_by_weight.index(__A )
_lowerCamelCase : List[str] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
lowerCAmelCase : Optional[int] =[int(x) for x in input("Input profits separated by spaces: ").split()]
lowerCAmelCase : Union[str, Any] =[int(x) for x in input("Input weights separated by spaces: ").split()]
lowerCAmelCase : Tuple =int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""")
_lowerCamelCase : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""")
_lowerCamelCase : Union[str, Any] = bertabert.config.encoder.vocab_size
_lowerCamelCase : List[Any] = tokenizer.sep_token_id
_lowerCamelCase : Any = tokenizer.cls_token_id
_lowerCamelCase : Optional[Any] = 128
_lowerCamelCase : Dict = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""")
_lowerCamelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""")
_lowerCamelCase : List[Any] = train_dataset.select(range(32))
_lowerCamelCase : Tuple = val_dataset.select(range(16))
_lowerCamelCase : Tuple = 4
def _map_to_encoder_decoder_inputs(_UpperCamelCase : Optional[Any]):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : Dict = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCamelCase , max_length=512)
_lowerCamelCase : Any = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCamelCase , max_length=128)
_lowerCamelCase : Dict = inputs.input_ids
_lowerCamelCase : str = inputs.attention_mask
_lowerCamelCase : Tuple = outputs.input_ids
_lowerCamelCase : Optional[int] = outputs.input_ids.copy()
_lowerCamelCase : List[str] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_lowerCamelCase : List[str] = outputs.attention_mask
assert all(len(_UpperCamelCase) == 512 for x in inputs.input_ids)
assert all(len(_UpperCamelCase) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(_UpperCamelCase : str):
_lowerCamelCase : List[str] = pred.label_ids
_lowerCamelCase : str = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : List[str] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = sum([int(pred_str[i] == label_str[i]) for i in range(len(_UpperCamelCase))]) / len(_UpperCamelCase)
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : Any = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCamelCase , batch_size=_UpperCamelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_lowerCamelCase : Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCamelCase , batch_size=_UpperCamelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Dict = SeqaSeqTrainingArguments(
output_dir=_UpperCamelCase , per_device_train_batch_size=_UpperCamelCase , per_device_eval_batch_size=_UpperCamelCase , predict_with_generate=_UpperCamelCase , evaluation_strategy="""steps""" , do_train=_UpperCamelCase , do_eval=_UpperCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : Dict = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# start training
trainer.train()
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase : Optional[int] =[
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
lowerCAmelCase : Any =[
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
lowerCAmelCase : Any =(
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
lowerCAmelCase : Tuple =(
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
lowerCAmelCase : List[Any] =[
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def A__ ( __A , __A ):
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowerCamelCase : List[Any] = k.replace(__A , __A )
return k
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = BigBirdPegasusConfig(**__A )
_lowerCamelCase : str = BigBirdPegasusForConditionalGeneration(__A )
_lowerCamelCase : Optional[Any] = torch_model.state_dict()
_lowerCamelCase : Any = {}
# separating decoder weights
_lowerCamelCase : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_lowerCamelCase : int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
_lowerCamelCase : List[str] = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
_lowerCamelCase : Any = DECODER_PATTERNS
_lowerCamelCase : str = rename_state_dict_key(__A , __A )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_lowerCamelCase : Optional[Any] = v.T
_lowerCamelCase : Optional[int] = torch.from_numpy(__A )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
_lowerCamelCase : int = [k.endswith(__A ) for ending in KEYS_TO_IGNORE]
if any(__A ):
continue
_lowerCamelCase : int = REMAINING_PATTERNS
_lowerCamelCase : Dict = rename_state_dict_key(__A , __A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_lowerCamelCase : str = v.T
_lowerCamelCase : int = torch.from_numpy(__A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_lowerCamelCase : List[str] = mapping["""model.embed_positions.weight"""]
_lowerCamelCase : int = mapping.pop("""model.embed_positions.weight""" )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = torch_model.load_state_dict(__A , strict=__A )
_lowerCamelCase : Any = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = tf.train.list_variables(__A )
_lowerCamelCase : Dict = {}
_lowerCamelCase : Any = ["""global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
_lowerCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase : Optional[Any] = tf.train.load_variable(__A , __A )
_lowerCamelCase : Union[str, Any] = array
return tf_weights
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = get_tf_weights_as_numpy(__A )
_lowerCamelCase : int = convert_bigbird_pegasus(__A , __A )
torch_model.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase : int =parser.parse_args()
lowerCAmelCase : List[str] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 1 |
import warnings
from .generation import TFGenerationMixin
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , __lowerCAmelCase , )
| 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase : Union[str, Any] ="\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowerCAmelCase : Optional[int] ="\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowerCAmelCase : str ="\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=False) ->int:
"""simple docstring"""
if rouge_types is None:
_lowerCamelCase : str = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
_lowerCamelCase : Dict = rouge_scorer.RougeScorer(rouge_types=_UpperCamelCase , use_stemmer=_UpperCamelCase)
if use_aggregator:
_lowerCamelCase : int = scoring.BootstrapAggregator()
else:
_lowerCamelCase : Union[str, Any] = []
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Dict = scorer.score(_UpperCamelCase , _UpperCamelCase)
if use_aggregator:
aggregator.add_scores(_UpperCamelCase)
else:
scores.append(_UpperCamelCase)
if use_aggregator:
_lowerCamelCase : str = aggregator.aggregate()
else:
_lowerCamelCase : Dict = {}
for key in scores[0]:
_lowerCamelCase : str = [score[key] for score in scores]
return result
| 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 1 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['onnx']
def __init__( self : Optional[Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->int:
"""simple docstring"""
requires_backends(self , ["""onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Optional[int]) ->int:
"""simple docstring"""
requires_backends(cls , ["""onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any]) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""onnx"""])
| 15 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : Optional[Any] =["small", "medium", "large"]
lowerCAmelCase : Dict ="lm_head.decoder.weight"
lowerCAmelCase : str ="lm_head.weight"
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = torch.load(__A )
_lowerCamelCase : Tuple = d.pop(__A )
os.makedirs(__A , exist_ok=__A )
torch.save(__A , os.path.join(__A , __A ) )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
lowerCAmelCase : Optional[int] =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : Union[str, Any] =os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
lowerCAmelCase : str =F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase : List[str] =random.Random()
def A__ ( __A , __A=1.0 , __A=None , __A=None ):
'''simple docstring'''
if rng is None:
_lowerCamelCase : str = global_rng
_lowerCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=7 , _UpperCamelCase : Dict=400 , _UpperCamelCase : str=2000 , _UpperCamelCase : str=10 , _UpperCamelCase : Any=160 , _UpperCamelCase : List[Any]=8 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : List[Any]=4000 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Dict=True , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Optional[int] = min_seq_length
_lowerCamelCase : Optional[Any] = max_seq_length
_lowerCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : Optional[int] = padding_value
_lowerCamelCase : Any = sampling_rate
_lowerCamelCase : Optional[Any] = return_attention_mask
_lowerCamelCase : str = do_normalize
_lowerCamelCase : int = feature_size
_lowerCamelCase : Dict = chunk_length
_lowerCamelCase : Optional[int] = hop_length
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Dict=False , _UpperCamelCase : int=False) ->Union[str, Any]:
"""simple docstring"""
def _flatten(_UpperCamelCase : Optional[Any]):
return list(itertools.chain(*_UpperCamelCase))
if equal_length:
_lowerCamelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
_lowerCamelCase : Union[str, Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_lowerCamelCase : Dict = [np.asarray(_UpperCamelCase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = WhisperFeatureExtractor if is_speech_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = WhisperFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[str] = feat_extract_first.save_pretrained(_UpperCamelCase)[0]
check_json_file_has_correct_format(_UpperCamelCase)
_lowerCamelCase : Tuple = self.feature_extraction_class.from_pretrained(_UpperCamelCase)
_lowerCamelCase : Tuple = feat_extract_first.to_dict()
_lowerCamelCase : Any = feat_extract_second.to_dict()
_lowerCamelCase : Optional[Any] = feat_extract_first.mel_filters
_lowerCamelCase : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase))
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] = os.path.join(_UpperCamelCase , """feat_extract.json""")
feat_extract_first.to_json_file(_UpperCamelCase)
_lowerCamelCase : List[str] = self.feature_extraction_class.from_json_file(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = feat_extract_first.to_dict()
_lowerCamelCase : Dict = feat_extract_second.to_dict()
_lowerCamelCase : List[Any] = feat_extract_first.mel_filters
_lowerCamelCase : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase))
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_lowerCamelCase : List[Any] = [np.asarray(_UpperCamelCase) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : Tuple = feature_extractor(_UpperCamelCase , padding="""max_length""" , return_tensors="""np""").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
_lowerCamelCase : Tuple = feature_extractor(speech_inputs[0] , return_tensors="""np""").input_features
_lowerCamelCase : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""").input_features
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
# Test batched
_lowerCamelCase : Dict = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
_lowerCamelCase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Any = [floats_list((1, x))[0] for x in (800, 800, 800)]
_lowerCamelCase : Tuple = np.asarray(_UpperCamelCase)
_lowerCamelCase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
_lowerCamelCase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
# Test truncation required
_lowerCamelCase : Union[str, Any] = [floats_list((1, x))[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200)]
_lowerCamelCase : Optional[int] = [np.asarray(_UpperCamelCase) for speech_input in speech_inputs]
_lowerCamelCase : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase : int = [np.asarray(_UpperCamelCase) for speech_input in speech_inputs_truncated]
_lowerCamelCase : Tuple = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
_lowerCamelCase : Dict = feature_extractor(_UpperCamelCase , return_tensors="""np""").input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
import torch
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowerCamelCase : int = np.random.rand(100 , 32).astype(np.floataa)
_lowerCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[str] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
_lowerCamelCase : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""")
# automatic decoding with librispeech
_lowerCamelCase : Any = ds.sort("""id""").select(range(_UpperCamelCase))[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
])
# fmt: on
_lowerCamelCase : Union[str, Any] = self._load_datasamples(1)
_lowerCamelCase : Dict = WhisperFeatureExtractor()
_lowerCamelCase : Any = feature_extractor(_UpperCamelCase , return_tensors="""pt""").input_features
self.assertEqual(input_features.shape , (1, 80, 3000))
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCamelCase , atol=1E-4))
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowerCamelCase : List[str] = self._load_datasamples(1)[0]
_lowerCamelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowerCamelCase : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCamelCase)[0]
self.assertTrue(np.all(np.mean(_UpperCamelCase) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase) - 1) < 1E-3))
| 15 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __A , __A , __A ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : List[Any] = MobileBertConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : Union[str, Any] = MobileBertForPreTraining(__A )
# Load weights from tf checkpoint
_lowerCamelCase : Dict = load_tf_weights_in_mobilebert(__A , __A , __A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase : str =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 15 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 1 |
from collections.abc import Callable
import numpy as np
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCamelCase : int = np.zeros((n + 1,) )
_lowerCamelCase : Dict = ya
_lowerCamelCase : Tuple = xa
for k in range(__A ):
_lowerCamelCase : int = y[k] + step_size * ode_func(__A , y[k] )
_lowerCamelCase : Optional[Any] = y[k] + (
(step_size / 2) * (ode_func(__A , y[k] ) + ode_func(x + step_size , __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 1 |
def A__ ( __A , __A ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowerCamelCase : str = str(bin(__A ) )[2:] # remove the leading "0b"
_lowerCamelCase : Dict = str(bin(__A ) )[2:] # remove the leading "0b"
_lowerCamelCase : Optional[Any] = max(len(__A ) , len(__A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__A ) , b_binary.zfill(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 1 |
import math
import os
import sys
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = """"""
try:
with open(__A , """rb""" ) as binary_file:
_lowerCamelCase : List[Any] = binary_file.read()
for dat in data:
_lowerCamelCase : Optional[int] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
lexicon.pop(__A )
_lowerCamelCase : str = last_match_id
if math.loga(__A ).is_integer():
for curr_key in lexicon:
_lowerCamelCase : Any = """0""" + lexicon[curr_key]
_lowerCamelCase : str = bin(__A )[2:]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = {"""0""": """0""", """1""": """1"""}
_lowerCamelCase , _lowerCamelCase : List[Any] = """""", """"""
_lowerCamelCase : Dict = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCamelCase : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__A , __A , __A , __A )
index += 1
_lowerCamelCase : Optional[int] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowerCamelCase : Tuple = lexicon[curr_string]
result += last_match_id
return result
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = os.path.getsize(__A )
_lowerCamelCase : List[Any] = bin(__A )[2:]
_lowerCamelCase : Optional[int] = len(__A )
return "0" * (length_length - 1) + file_length_binary + compressed
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = 8
try:
with open(__A , """wb""" ) as opened_file:
_lowerCamelCase : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = read_file_binary(__A )
_lowerCamelCase : List[Any] = compress_data(__A )
_lowerCamelCase : List[str] = add_file_length(__A , __A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 15 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 1 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any]) ->Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Any) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[Any]) ->int:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int]) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , *_UpperCamelCase : str , **_UpperCamelCase : Any) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *_UpperCamelCase : List[str] , **_UpperCamelCase : Dict) ->Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[str]) ->str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Dict) ->Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *_UpperCamelCase : Dict , **_UpperCamelCase : int) ->int:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
| 15 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = model.config
_lowerCamelCase : List[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase : Tuple = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def A__ ( __A ):
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase : Optional[Any] = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
_lowerCamelCase : Optional[int] = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
_lowerCamelCase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCamelCase : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
_lowerCamelCase : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
_lowerCamelCase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
_lowerCamelCase : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCamelCase : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCamelCase : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_lowerCamelCase : List[str] = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
_lowerCamelCase : Tuple = """encoder.layernorm.bias"""
return name
def A__ ( __A , __A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : str = orig_state_dict.pop(__A )
if "qkv" in key:
_lowerCamelCase : Any = key.split(""".""" )
_lowerCamelCase : Dict = int(key_split[3] )
_lowerCamelCase : Optional[int] = int(key_split[5] )
_lowerCamelCase : List[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : List[str] = val[:dim, :]
_lowerCamelCase : Tuple = val[dim : dim * 2, :]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : Optional[Any] = val[:dim]
_lowerCamelCase : Tuple = val[dim : dim * 2]
_lowerCamelCase : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A__ ( __A , __A=None , __A=False ):
'''simple docstring'''
# load original model
_lowerCamelCase : int = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase : int = get_configs(__A )
_lowerCamelCase : Tuple = DonutSwinModel(__A )
_lowerCamelCase : str = MBartForCausalLM(__A )
_lowerCamelCase : Dict = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
_lowerCamelCase : List[str] = original_model.state_dict()
_lowerCamelCase : str = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
_lowerCamelCase : List[str] = load_dataset("""hf-internal-testing/example-documents""" )
_lowerCamelCase : Dict = dataset["""test"""][0]["""image"""].convert("""RGB""" )
_lowerCamelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
_lowerCamelCase : Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase : str = DonutProcessor(__A , __A )
_lowerCamelCase : List[Any] = processor(__A , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase : List[Any] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCamelCase : int = """When is the coffee break?"""
_lowerCamelCase : Any = task_prompt.replace("""{user_input}""" , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase : int = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase : str = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase : int = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase : List[Any] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase : str = """hello world"""
else:
raise ValueError("""Model name not supported""" )
_lowerCamelCase : Dict = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors="""pt""" )[
"""input_ids"""
]
_lowerCamelCase : Optional[int] = original_model.encoder.model.patch_embed(__A )
_lowerCamelCase , _lowerCamelCase : List[str] = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase : Any = original_model.encoder(__A )
_lowerCamelCase : Tuple = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase : Any = original_model(__A , __A , __A ).logits
_lowerCamelCase : Optional[Any] = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowerCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
lowerCAmelCase : Optional[int] =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_lowerCamelCase : Optional[int] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__A )
else:
_lowerCamelCase : Optional[Any] = sylvester(number - 1 )
_lowerCamelCase : Tuple = num - 1
_lowerCamelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 15 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=13 , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=True , _UpperCamelCase : str=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Any=True , _UpperCamelCase : int=True , _UpperCamelCase : str=False , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Optional[int]=99 , _UpperCamelCase : Tuple=0 , _UpperCamelCase : int=32 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Optional[Any]="last" , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , ) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = use_input_lengths
_lowerCamelCase : Union[str, Any] = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : str = gelu_activation
_lowerCamelCase : List[Any] = sinusoidal_embeddings
_lowerCamelCase : str = causal
_lowerCamelCase : Dict = asm
_lowerCamelCase : Union[str, Any] = n_langs
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Tuple = n_special
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Any = num_labels
_lowerCamelCase : str = num_choices
_lowerCamelCase : Union[str, Any] = summary_type
_lowerCamelCase : Dict = use_proj
_lowerCamelCase : int = scope
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Union[str, Any] = None
if self.use_input_lengths:
_lowerCamelCase : int = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowerCamelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , 2).float()
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = FlaubertModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Dict = model(_UpperCamelCase , lengths=_UpperCamelCase , langs=_UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , langs=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Tuple , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaubertWithLMHeadModel(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , ) ->str:
"""simple docstring"""
_lowerCamelCase : str = FlaubertForQuestionAnsweringSimple(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase)
_lowerCamelCase : int = model(_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = FlaubertForQuestionAnswering(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase)
_lowerCamelCase : Tuple = model(
_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , cls_index=_UpperCamelCase , is_impossible=_UpperCamelCase , p_mask=_UpperCamelCase , )
_lowerCamelCase : List[Any] = model(
_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , cls_index=_UpperCamelCase , is_impossible=_UpperCamelCase , )
((_lowerCamelCase) , ) : List[Any] = result_with_labels.to_tuple()
_lowerCamelCase : Dict = model(_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase)
((_lowerCamelCase) , ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , ) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = FlaubertForSequenceClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , ) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : int = FlaubertForTokenClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Any , ) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_choices
_lowerCamelCase : List[Any] = FlaubertForMultipleChoice(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Dict = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str) ->Any:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Tuple=False) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowerCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase)
_lowerCamelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase)
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = FlaubertModelTester(self)
_lowerCamelCase : List[str] = ConfigTester(self , config_class=_UpperCamelCase , emb_dim=37)
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = FlaubertModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
@slow
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = model_class(config=_UpperCamelCase)
_lowerCamelCase : Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.jit.trace(
_UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu"""), inputs_dict["""attention_mask"""].to("""cpu""")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCamelCase , os.path.join(_UpperCamelCase , """traced_model.pt"""))
_lowerCamelCase : List[Any] = torch.jit.load(os.path.join(_UpperCamelCase , """traced_model.pt""") , map_location=_UpperCamelCase)
loaded(inputs_dict["""input_ids"""].to(_UpperCamelCase) , inputs_dict["""attention_mask"""].to(_UpperCamelCase))
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : str = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""")
_lowerCamelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(_UpperCamelCase)[0]
_lowerCamelCase : Tuple = torch.Size((1, 11, 768))
self.assertEqual(output.shape , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4))
| 15 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 1 |
import math
lowerCAmelCase : List[str] =10
lowerCAmelCase : List[Any] =7
lowerCAmelCase : Dict =BALLS_PER_COLOUR * NUM_COLOURS
def A__ ( __A = 20 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = math.comb(__A , __A )
_lowerCamelCase : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __A )
_lowerCamelCase : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 15 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
if isinstance(__A , __A ):
for v in tree.values():
shapes.extend(_fetch_dims(__A ) )
elif isinstance(__A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__A ) )
elif isinstance(__A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = []
for d in reversed(__A ):
idx.append(flat_idx % d )
_lowerCamelCase : List[str] = flat_idx // d
return tuple(reversed(__A ) )
@torch.jit.ignore
def A__ ( __A , __A , __A , __A = None , __A = None , ):
'''simple docstring'''
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__A ) -> None:
_lowerCamelCase : Optional[int] = True
for i in range(len(__A ) ):
_lowerCamelCase : int = -1 * (i + 1)
l[reversed_idx] &= tally
_lowerCamelCase : str = l[reversed_idx]
if start_edges is None:
_lowerCamelCase : Optional[int] = [s == 0 for s in start]
reduce_edge_list(__A )
if end_edges is None:
_lowerCamelCase : List[str] = [e == (d - 1) for e, d in zip(__A , __A )]
reduce_edge_list(__A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__A ) == 0:
return [()]
elif len(__A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_lowerCamelCase : List[Tuple[slice, ...]] = []
_lowerCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__A , __A ):
if s == e:
path_list.append(slice(__A , s + 1 ) )
else:
break
_lowerCamelCase : Tuple[slice, ...] = tuple(__A )
_lowerCamelCase : Tuple = len(__A )
# start == end, and we're done
if divergence_idx == len(__A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase : List[Any] = start[divergence_idx]
return tuple(
path + (slice(__A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase : List[str] = end[divergence_idx]
return tuple(
path + (slice(__A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_lowerCamelCase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = t.shape[:no_batch_dims]
_lowerCamelCase : str = list(_flat_idx_to_idx(__A , __A ) )
# _get_minimal_slice_set is inclusive
_lowerCamelCase : List[str] = list(_flat_idx_to_idx(flat_end - 1 , __A ) )
# Get an ordered list of slices to perform
_lowerCamelCase : int = _get_minimal_slice_set(
__A , __A , __A , )
_lowerCamelCase : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def A__ ( __A , __A , __A , __A , __A = False , __A = None , __A = False , ):
'''simple docstring'''
if not (len(__A ) > 0):
raise ValueError("""Must provide at least one input""" )
_lowerCamelCase : Dict = [shape[:no_batch_dims] for shape in _fetch_dims(__A )]
_lowerCamelCase : Union[str, Any] = tuple([max(__A ) for s in zip(*__A )] )
def _prep_inputs(__A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_lowerCamelCase : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_lowerCamelCase : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_lowerCamelCase : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_lowerCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , __A )
_lowerCamelCase : str = None
if _out is not None:
_lowerCamelCase : int = tensor_tree_map(lambda __A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_lowerCamelCase : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_lowerCamelCase : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = prepped_outputs
for _ in range(__A ):
# Chunk the input
if not low_mem:
_lowerCamelCase : Dict = _select_chunk
else:
_lowerCamelCase : Optional[int] = partial(
_chunk_slice , flat_start=__A , flat_end=min(__A , i + chunk_size ) , no_batch_dims=len(__A ) , )
_lowerCamelCase : Dict[str, Any] = tensor_tree_map(__A , __A )
# Run the layer on the chunk
_lowerCamelCase : Dict = layer(**__A )
# Allocate space for the output
if out is None:
_lowerCamelCase : Dict = tensor_tree_map(lambda __A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __A )
# Put the chunk in its pre-allocated space
if isinstance(__A , __A ):
def assign(__A , __A ) -> None:
for k, v in da.items():
if isinstance(__A , __A ):
assign(__A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_lowerCamelCase : List[str] = da[k]
assign(__A , __A )
elif isinstance(__A , __A ):
for xa, xa in zip(__A , __A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_lowerCamelCase : Optional[int] = xa
elif isinstance(__A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_lowerCamelCase : Tuple = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
_lowerCamelCase : Optional[int] = tensor_tree_map(lambda __A : t.view(orig_batch_dims + t.shape[1:] ) , __A )
return out
class __snake_case :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int = 512 , ) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = max_chunk_size
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[tuple] = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Callable , _UpperCamelCase : tuple , _UpperCamelCase : int) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_lowerCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
_lowerCamelCase : Dict = [c for c in candidates if c > min_chunk_size]
_lowerCamelCase : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCamelCase : int) -> bool:
try:
with torch.no_grad():
fn(*_UpperCamelCase , chunk_size=_UpperCamelCase)
return True
except RuntimeError:
return False
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = len(_UpperCamelCase) - 1
while i > min_viable_chunk_size_index:
_lowerCamelCase : Optional[int] = test_chunk_size(candidates[i])
if not viable:
_lowerCamelCase : str = (min_viable_chunk_size_index + i) // 2
else:
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : List[Any] = (i + len(_UpperCamelCase) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Iterable , _UpperCamelCase : Iterable) ->bool:
"""simple docstring"""
_lowerCamelCase : Dict = True
for aa, aa in zip(_UpperCamelCase , _UpperCamelCase):
assert type(_UpperCamelCase) == type(_UpperCamelCase)
if isinstance(_UpperCamelCase , (list, tuple)):
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase)
elif isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Dict = [v for _, v in sorted(aa.items() , key=lambda _UpperCamelCase: x[0])]
_lowerCamelCase : Any = [v for _, v in sorted(aa.items() , key=lambda _UpperCamelCase: x[0])]
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase)
else:
consistent &= aa == aa
return consistent
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Callable , _UpperCamelCase : tuple , _UpperCamelCase : int , ) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = True
_lowerCamelCase : tuple = tree_map(lambda _UpperCamelCase: a.shape if isinstance(_UpperCamelCase , torch.Tensor) else a , _UpperCamelCase , _UpperCamelCase)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , _UpperCamelCase)
else:
# Otherwise, we can reuse the precomputed value
_lowerCamelCase : List[str] = False
if not consistent:
_lowerCamelCase : Optional[Any] = self._determine_favorable_chunk_size(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_lowerCamelCase : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 15 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : int =logging.get_logger(__name__)
class __snake_case :
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : str = None , _UpperCamelCase : uuid.UUID = None , _UpperCamelCase : Any=None , _UpperCamelCase : List[Any]=None) ->str:
"""simple docstring"""
if not conversation_id:
_lowerCamelCase : Dict = uuid.uuida()
if past_user_inputs is None:
_lowerCamelCase : Dict = []
if generated_responses is None:
_lowerCamelCase : str = []
_lowerCamelCase : uuid.UUID = conversation_id
_lowerCamelCase : List[str] = past_user_inputs
_lowerCamelCase : List[str] = generated_responses
_lowerCamelCase : Optional[str] = text
def __eq__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : bool = False) ->Tuple:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""")
_lowerCamelCase : Optional[int] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""")
else:
_lowerCamelCase : Union[str, Any] = text
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_lowerCamelCase : Optional[int] = None
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str) ->str:
"""simple docstring"""
self.generated_responses.append(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_lowerCamelCase : str = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__lowerCAmelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *_UpperCamelCase : str , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
if self.tokenizer.pad_token_id is None:
_lowerCamelCase : Any = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Union[str, Any]=None , **_UpperCamelCase : Union[str, Any]) ->int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = {}
if min_length_for_response is not None:
_lowerCamelCase : Dict = min_length_for_response
if minimum_tokens is not None:
_lowerCamelCase : Dict = minimum_tokens
if "max_length" in generate_kwargs:
_lowerCamelCase : Any = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_lowerCamelCase : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCamelCase)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , _UpperCamelCase : Union[Conversation, List[Conversation]] , _UpperCamelCase : Optional[Any]=0 , **_UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : str = super().__call__(_UpperCamelCase , num_workers=_UpperCamelCase , **_UpperCamelCase)
if isinstance(_UpperCamelCase , _UpperCamelCase) and len(_UpperCamelCase) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Conversation , _UpperCamelCase : Tuple=32) ->Dict[str, Any]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""")
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""")
if hasattr(self.tokenizer , """_build_conversation_input_ids"""):
_lowerCamelCase : List[str] = self.tokenizer._build_conversation_input_ids(_UpperCamelCase)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_lowerCamelCase : Optional[int] = self._legacy_parse_and_tokenize(_UpperCamelCase)
if self.framework == "pt":
_lowerCamelCase : Tuple = torch.LongTensor([input_ids])
elif self.framework == "tf":
_lowerCamelCase : Optional[Any] = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=10 , **_UpperCamelCase : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length)
_lowerCamelCase : Union[str, Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""")
_lowerCamelCase : Optional[Any] = max_length - minimum_tokens
_lowerCamelCase : Tuple = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_lowerCamelCase : List[str] = model_inputs["""attention_mask"""][:, -trim:]
_lowerCamelCase : List[Any] = model_inputs.pop("""conversation""")
_lowerCamelCase : Any = max_length
_lowerCamelCase : str = self.model.generate(**_UpperCamelCase , **_UpperCamelCase)
if self.model.config.is_encoder_decoder:
_lowerCamelCase : Optional[Any] = 1
else:
_lowerCamelCase : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=True) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = model_outputs["""output_ids"""]
_lowerCamelCase : Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , )
_lowerCamelCase : Tuple = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_UpperCamelCase)
return conversation
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Conversation) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer.eos_token_id
_lowerCamelCase : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase))
if len(_UpperCamelCase) > self.tokenizer.model_max_length:
_lowerCamelCase : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 15 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import numpy as np
import datasets
lowerCAmelCase : Optional[int] ="\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
lowerCAmelCase : int ="\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
lowerCAmelCase : Tuple ="\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""") , id="""X"""),
}) , )
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int = np.array(_UpperCamelCase)
_lowerCamelCase : Tuple = np.array(_UpperCamelCase)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("""Expected `X` to be a 2D vector""")
if len(reference_distribution.shape) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""")
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""")
# Get mahalanobis distance for each prediction
_lowerCamelCase : int = X - np.mean(_UpperCamelCase)
_lowerCamelCase : List[str] = np.cov(reference_distribution.T)
try:
_lowerCamelCase : Tuple = np.linalg.inv(_UpperCamelCase)
except np.linalg.LinAlgError:
_lowerCamelCase : Tuple = np.linalg.pinv(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = np.dot(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = np.dot(_UpperCamelCase , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 15 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 1 |
import argparse
import os
import re
lowerCAmelCase : Optional[Any] ="src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase : List[Any] =re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCAmelCase : List[Any] =re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def A__ ( __A , __A = False ):
'''simple docstring'''
with open(__A , """r""" , encoding="""utf-8""" ) as f:
_lowerCamelCase : List[Any] = f.read()
_lowerCamelCase : Optional[Any] = content.split("""\n""" )
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[int] = 0
while line_idx < len(__A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_lowerCamelCase : Union[str, Any] = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_lowerCamelCase : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_lowerCamelCase : Dict = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_lowerCamelCase : Tuple = sorted(__A , key=lambda __A : _re_identifier.search(__A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(__A ) )
elif "\n".join(__A ) != content:
return True
def A__ ( __A = False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [os.path.join(__A , __A ) for f in os.listdir(__A ) if f.endswith(""".py""" )]
_lowerCamelCase : Any = [sort_auto_mapping(__A , overwrite=__A ) for fname in fnames]
if not overwrite and any(__A ):
_lowerCamelCase : List[str] = [f for f, d in zip(__A , __A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(__A )}. Run `make style` to fix"""
""" this.""" )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCAmelCase : Optional[Any] =parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""")
_lowerCamelCase : List[Any] = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2)
_lowerCamelCase : Tuple = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
for example in examples:
_lowerCamelCase : Any = video_classifier(_UpperCamelCase)
self.assertEqual(
_UpperCamelCase , [
{"""score""": ANY(_UpperCamelCase), """label""": ANY(_UpperCamelCase)},
{"""score""": ANY(_UpperCamelCase), """label""": ANY(_UpperCamelCase)},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_lowerCamelCase : Any = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10})
_lowerCamelCase : str = pipeline(
"""video-classification""" , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4)
_lowerCamelCase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""")
_lowerCamelCase : str = video_classifier(_UpperCamelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
_lowerCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
pass
| 15 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 1 |
import datasets
from .evaluate import evaluate
lowerCAmelCase : int ="\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCAmelCase : Optional[Any] ="\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCAmelCase : str ="\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string"""), """prediction_text""": datasets.Value("""string""")},
"""references""": {
"""id""": datasets.Value("""string"""),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
},
}) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_lowerCamelCase : str = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_lowerCamelCase : List[Any] = evaluate(dataset=_UpperCamelCase , predictions=_UpperCamelCase)
return score
| 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase : List[str] =logging.getLogger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : str=None) ->str:
"""simple docstring"""
super().__init__(
_UpperCamelCase , question_encoder_tokenizer=_UpperCamelCase , generator_tokenizer=_UpperCamelCase , index=_UpperCamelCase , init_retrieval=_UpperCamelCase , )
_lowerCamelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : int) ->List[Any]:
"""simple docstring"""
logger.info("""initializing retrieval""")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""")
# needs to be set manually
_lowerCamelCase : str = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCamelCase : Dict = str(distributed_port + 1)
_lowerCamelCase : str = dist.new_group(ranks=_UpperCamelCase , backend="""gloo""")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dist.get_rank(group=self.process_group) == 0
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=torch.floataa) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.empty(_UpperCamelCase , dtype=_UpperCamelCase)
dist.scatter(_UpperCamelCase , src=0 , scatter_list=_UpperCamelCase , group=self.process_group)
return target_tensor
def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCamelCase : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""")) , _UpperCamelCase)
return ifname
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : int) ->Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self._main_retrieve(_UpperCamelCase , _UpperCamelCase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCamelCase)
# distributed training
_lowerCamelCase : int = dist.get_world_size(group=self.process_group)
# gather logic
_lowerCamelCase : int = None
if self._is_main():
_lowerCamelCase : Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(_UpperCamelCase)]
dist.gather(torch.tensor(_UpperCamelCase) , dst=0 , gather_list=_UpperCamelCase , group=self.process_group)
# scatter logic
_lowerCamelCase : int = question_hidden_states.shape[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Tuple = []
if self._is_main():
assert len(_UpperCamelCase) == world_size
_lowerCamelCase , _lowerCamelCase : List[str] = self._main_retrieve(torch.cat(_UpperCamelCase).numpy() , _UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : Tuple = torch.tensor(_UpperCamelCase), torch.tensor(_UpperCamelCase)
_lowerCamelCase : List[Any] = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = self._scattered(_UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa)
_lowerCamelCase : Dict = self._scattered(_UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCamelCase)
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int=7 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Optional[int]=18 , _UpperCamelCase : Any=30 , _UpperCamelCase : Optional[Any]=400 , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=True , _UpperCamelCase : str=[0.5, 0.5, 0.5] , _UpperCamelCase : int=[0.5, 0.5, 0.5] , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCamelCase : Dict = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : Tuple = size
_lowerCamelCase : Union[str, Any] = do_normalize
_lowerCamelCase : Dict = image_mean
_lowerCamelCase : str = image_std
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = DPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = DPTImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCamelCase , """image_mean"""))
self.assertTrue(hasattr(_UpperCamelCase , """image_std"""))
self.assertTrue(hasattr(_UpperCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_UpperCamelCase , """do_resize"""))
self.assertTrue(hasattr(_UpperCamelCase , """size"""))
def _SCREAMING_SNAKE_CASE ( self : int) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image)
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCamelCase : Optional[int] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray)
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCamelCase : List[str] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor)
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCamelCase : str = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'naver-clova-ix/donut-base-finetuned-docvqa'
_snake_case = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_snake_case = 'document_qa'
_snake_case = AutoProcessor
_snake_case = VisionEncoderDecoderModel
_snake_case = ['image', 'text']
_snake_case = ['text']
def __init__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : int) ->Dict:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""")
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : "Image" , _UpperCamelCase : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCamelCase : Optional[int] = task_prompt.replace("""{user_input}""" , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = self.pre_processor.tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors="""pt""").input_ids
_lowerCamelCase : Dict = self.pre_processor(_UpperCamelCase , return_tensors="""pt""").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Optional[Any]) ->Optional[int]:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCamelCase , ).sequences
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.pre_processor.batch_decode(_UpperCamelCase)[0]
_lowerCamelCase : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , """""")
_lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""")
_lowerCamelCase : Optional[int] = re.sub(R"""<.*?>""" , """""" , _UpperCamelCase , count=1).strip() # remove first task start token
_lowerCamelCase : List[str] = self.pre_processor.tokenajson(_UpperCamelCase)
return sequence["answer"]
| 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Union[str, Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : int = 8 , **_UpperCamelCase : Dict , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = do_rescale
_lowerCamelCase : str = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str]) ->np.ndarray:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = get_image_size(_UpperCamelCase)
_lowerCamelCase : str = (old_height // size + 1) * size - old_height
_lowerCamelCase : List[Any] = (old_width // size + 1) * size - old_width
return pad(_UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : ImageInput , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[float] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_UpperCamelCase : str , ) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : List[Any] = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : List[str] = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : str = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[int] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Union[str, Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_pad:
_lowerCamelCase : Union[str, Any] = [self.pad(_UpperCamelCase , size=_UpperCamelCase) for image in images]
_lowerCamelCase : Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 1 |
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : List[Any] = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Optional[int] = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = RobertaTokenizer
_snake_case = RobertaTokenizerFast
_snake_case = True
_snake_case = {'cls_token': '<s>'}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCamelCase : List[str] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCamelCase : Union[str, Any] = {"""unk_token""": """<unk>"""}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(_UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : int , **_UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , **_UpperCamelCase : Tuple) ->Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = """lower newer"""
_lowerCamelCase : Dict = """lower newer"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
_lowerCamelCase : List[Any] = """lower newer"""
_lowerCamelCase : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCamelCase : Any = tokenizer.tokenize(_UpperCamelCase) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase) , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_UpperCamelCase) , [0, 3_1414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_UpperCamelCase) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class.from_pretrained("""roberta-base""")
_lowerCamelCase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase)
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = """Encode this sequence."""
_lowerCamelCase : Any = tokenizer.byte_encoder[""" """.encode("""utf-8""")[0]]
# Testing encoder arguments
_lowerCamelCase : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase)
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase)
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
tokenizer.add_special_tokens({"""bos_token""": """<s>"""})
_lowerCamelCase : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase)
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase)
# Testing spaces after special tokens
_lowerCamelCase : Optional[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase)}) # mask token has a left space
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
_lowerCamelCase : str = """Encode <mask> sequence"""
_lowerCamelCase : Union[str, Any] = """Encode <mask>sequence"""
_lowerCamelCase : str = tokenizer.encode(_UpperCamelCase)
_lowerCamelCase : Optional[int] = encoded.index(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = tokenizer.encode(_UpperCamelCase)
_lowerCamelCase : Any = encoded.index(_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : str = """A, <mask> AllenNLP sentence."""
_lowerCamelCase : int = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
_lowerCamelCase : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
_lowerCamelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2])
self.assertSequenceEqual(
_UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
_UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
_lowerCamelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _UpperCamelCase)
self.assertEqual(post_processor_state["""add_prefix_space"""] , _UpperCamelCase)
self.assertEqual(post_processor_state["""trim_offsets"""] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_lowerCamelCase : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : str = F"""{text_of_1_token} {text_of_1_token}"""
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : List[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase) + 1, len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase) + 1, len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase), len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase), len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
_lowerCamelCase : Union[str, Any] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : List[str] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase) + 1, 1 + len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase), 1 + len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase), 1 + len(_UpperCamelCase) + 1 + len(_UpperCamelCase)) , )
| 15 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase : Dict =[
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A__ ( __A , __A=None ):
'''simple docstring'''
require_version(deps[pkg] , __A )
| 15 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 1 |
def A__ ( __A = 10**9 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_lowerCamelCase : List[str] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_UpperCamelCase , """tf_padding"""))
self.parent.assertTrue(hasattr(_UpperCamelCase , """depth_multiplier"""))
class __snake_case :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : str , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : int=32 , _UpperCamelCase : Any=0.2_5 , _UpperCamelCase : str=8 , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=1024 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Tuple="relu6" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[str]=10 , _UpperCamelCase : Optional[int]=None , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Union[str, Any] = depth_multiplier
_lowerCamelCase : Tuple = min_depth
_lowerCamelCase : List[str] = tf_padding
_lowerCamelCase : Optional[int] = int(last_hidden_size * depth_multiplier)
_lowerCamelCase : Optional[int] = output_stride
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = classifier_dropout_prob
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = MobileNetVaModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : List[str] = model(_UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = MobileNetVaForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_snake_case = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = MobileNetVaModelTester(self)
_lowerCamelCase : List[Any] = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(_UpperCamelCase)
_lowerCamelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]):
_lowerCamelCase : Optional[int] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Any = 26
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""") if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""").to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : List[str] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
| 15 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 1 |
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyVaaControlnetImgaImgPipeline
_snake_case = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_snake_case = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_snake_case = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case = False
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->str:
"""simple docstring"""
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCamelCase : List[Any] = UNetaDConditionModel(**_UpperCamelCase)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.dummy_unet
_lowerCamelCase : Dict = self.dummy_movq
_lowerCamelCase : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCamelCase : Dict = DDIMScheduler(**_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
_lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
_UpperCamelCase)
# create init_image
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_lowerCamelCase : int = Image.fromarray(np.uinta(_UpperCamelCase)).convert("""RGB""").resize((256, 256))
# create hint
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : Any = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : Any = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : List[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = """cpu"""
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**_UpperCamelCase)
_lowerCamelCase : str = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : List[str] = pipe(**self.get_dummy_inputs(_UpperCamelCase))
_lowerCamelCase : Any = output.images
_lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(_UpperCamelCase) , return_dict=_UpperCamelCase , )[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : str = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""")
_lowerCamelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
_lowerCamelCase : Optional[int] = init_image.resize((512, 512))
_lowerCamelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""")
_lowerCamelCase : int = torch.from_numpy(np.array(_UpperCamelCase)).float() / 2_5_5.0
_lowerCamelCase : Dict = hint.permute(2 , 0 , 1).unsqueeze(0)
_lowerCamelCase : Tuple = """A robot, 4k photo"""
_lowerCamelCase : Optional[int] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(_UpperCamelCase)
_lowerCamelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa)
_lowerCamelCase : List[str] = pipeline.to(_UpperCamelCase)
pipeline.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : str = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = pipe_prior(
_UpperCamelCase , image=_UpperCamelCase , strength=0.8_5 , generator=_UpperCamelCase , negative_prompt="""""" , ).to_tuple()
_lowerCamelCase : Optional[Any] = pipeline(
image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , hint=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
| 15 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : str=13 , _UpperCamelCase : Any=7 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Any=False , _UpperCamelCase : Dict=True , _UpperCamelCase : List[str]=99 , _UpperCamelCase : str=32 , _UpperCamelCase : Optional[Any]=5 , _UpperCamelCase : str=4 , _UpperCamelCase : Optional[Any]=64 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : str=512 , _UpperCamelCase : Tuple=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : int=2 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : str=4 , _UpperCamelCase : List[str]=1 , ) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : int = q_groups
_lowerCamelCase : Union[str, Any] = k_groups
_lowerCamelCase : List[str] = v_groups
_lowerCamelCase : Union[str, Any] = post_attention_groups
_lowerCamelCase : List[Any] = intermediate_groups
_lowerCamelCase : int = output_groups
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : int = None
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SqueezeBertModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Dict = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : List[str] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = SqueezeBertForQuestionAnswering(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : Optional[int] = SqueezeBertForSequenceClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : str = SqueezeBertForTokenClassification(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_choices
_lowerCamelCase : Tuple = SqueezeBertForMultipleChoice(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : List[str] = config_and_inputs
_lowerCamelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = True
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
_lowerCamelCase : List[str] = SqueezeBertModelTester(self)
_lowerCamelCase : List[str] = ConfigTester(self , config_class=_UpperCamelCase , dim=37)
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = SqueezeBertModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : int = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""")
_lowerCamelCase : Any = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
_lowerCamelCase : int = model(_UpperCamelCase)[0]
_lowerCamelCase : List[str] = torch.Size((1, 3))
self.assertEqual(output.shape , _UpperCamelCase)
_lowerCamelCase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-4))
| 15 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'ibert'
def __init__( self : Optional[Any] , _UpperCamelCase : Union[str, Any]=3_0522 , _UpperCamelCase : Dict=768 , _UpperCamelCase : str=12 , _UpperCamelCase : Union[str, Any]=12 , _UpperCamelCase : Dict=3072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Dict=2 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : List[Any]=1E-1_2 , _UpperCamelCase : Any=1 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int="absolute" , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any="none" , **_UpperCamelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : List[str] = position_embedding_type
_lowerCamelCase : Optional[int] = quant_mode
_lowerCamelCase : List[Any] = force_dequant
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCamelCase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 15 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Any = []
for rt in rc.restypes:
_lowerCamelCase : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCamelCase : List[Any] = {name: i for i, name in enumerate(__A )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCamelCase : List[str] = torch.tensor(
__A , dtype=torch.intaa , device=protein["""aatype"""].device , )
_lowerCamelCase : Optional[int] = torch.tensor(
__A , dtype=torch.intaa , device=protein["""aatype"""].device , )
_lowerCamelCase : Optional[int] = torch.tensor(
__A , dtype=torch.floataa , device=protein["""aatype"""].device , )
_lowerCamelCase : Union[str, Any] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCamelCase : Tuple = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : List[Any] = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : Optional[Any] = residx_atomaa_mask
_lowerCamelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCamelCase : Dict = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : Any = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCamelCase : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCamelCase : Optional[Any] = rc.restype_atoa[restype_letter]
_lowerCamelCase : int = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCamelCase : Tuple = rc.atom_order[atom_name]
_lowerCamelCase : Any = 1
_lowerCamelCase : str = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : List[str] = residx_atomaa_mask
return protein
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = tree_map(lambda __A : torch.tensor(__A , device=batch["""aatype"""].device ) , __A , np.ndarray )
_lowerCamelCase : Optional[Any] = tensor_tree_map(lambda __A : np.array(__A ) , make_atomaa_masks(__A ) )
return out
| 15 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 1 |
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : str = "" , _UpperCamelCase : bool = False) ->None:
"""simple docstring"""
_lowerCamelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_lowerCamelCase : str = is_leaf
_lowerCamelCase : str = prefix
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str) ->tuple[str, str, str]:
"""simple docstring"""
_lowerCamelCase : List[Any] = 0
for q, w in zip(self.prefix , _UpperCamelCase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : list[str]) ->None:
"""simple docstring"""
for word in words:
self.insert(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str) ->None:
"""simple docstring"""
if self.prefix == word:
_lowerCamelCase : List[str] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowerCamelCase : Optional[Any] = RadixNode(prefix=_UpperCamelCase , is_leaf=_UpperCamelCase)
else:
_lowerCamelCase : Dict = self.nodes[word[0]]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = incoming_node.match(
_UpperCamelCase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCamelCase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowerCamelCase : Optional[Any] = remaining_prefix
_lowerCamelCase : str = self.nodes[matching_string[0]]
_lowerCamelCase : List[str] = RadixNode(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = aux_node
if remaining_word == "":
_lowerCamelCase : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str) ->bool:
"""simple docstring"""
_lowerCamelCase : Tuple = self.nodes.get(word[0] , _UpperCamelCase)
if not incoming_node:
return False
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = incoming_node.match(
_UpperCamelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str) ->bool:
"""simple docstring"""
_lowerCamelCase : List[str] = self.nodes.get(word[0] , _UpperCamelCase)
if not incoming_node:
return False
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = incoming_node.match(
_UpperCamelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCamelCase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
_lowerCamelCase : List[str] = list(self.nodes.values())[0]
_lowerCamelCase : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
_lowerCamelCase : List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
_lowerCamelCase : Dict = False
# If there is 1 edge, we merge it with its child
else:
_lowerCamelCase : Dict = list(incoming_node.nodes.values())[0]
_lowerCamelCase : str = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowerCamelCase : Any = merging_node.nodes
return True
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int = 0) ->None:
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""")
for value in self.nodes.values():
value.print_tree(height + 1)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = """banana bananas bandana band apple all beast""".split()
_lowerCamelCase : int = RadixNode()
root.insert_many(__A )
assert all(root.find(__A ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def A__ ( ):
'''simple docstring'''
assert test_trie()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = RadixNode()
_lowerCamelCase : Dict = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__A )
print("""Words:""" , __A )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 15 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = DiTPipeline
_snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_UpperCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_UpperCamelCase , )
_lowerCamelCase : Dict = AutoencoderKL()
_lowerCamelCase : Tuple = DDIMScheduler()
_lowerCamelCase : Any = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : Any = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : Any = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Optional[int] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """cpu"""
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : List[str] = self.pipeline_class(**_UpperCamelCase)
pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : Dict = self.get_dummy_inputs(_UpperCamelCase)
_lowerCamelCase : Any = pipe(**_UpperCamelCase).images
_lowerCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3))
_lowerCamelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7])
_lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_UpperCamelCase , 1E-3)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_UpperCamelCase , expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = torch.manual_seed(0)
_lowerCamelCase : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""")
pipe.to("""cuda""")
_lowerCamelCase : List[str] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCamelCase : Any = pipe.get_label_ids(_UpperCamelCase)
_lowerCamelCase : List[Any] = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=40 , output_type="""np""").images
for word, image in zip(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : List[str] = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""")
assert np.abs((expected_image - image).max()) < 1E-2
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""")
_lowerCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("""cuda""")
_lowerCamelCase : Tuple = ["""vase""", """umbrella"""]
_lowerCamelCase : Dict = pipe.get_label_ids(_UpperCamelCase)
_lowerCamelCase : List[Any] = torch.manual_seed(0)
_lowerCamelCase : List[Any] = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=25 , output_type="""np""").images
for word, image in zip(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"""/dit/{word}_512.npy""")
assert np.abs((expected_image - image).max()) < 1E-1
| 15 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 15 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = torch.nn.Linear(2 , 4 )
_lowerCamelCase : Optional[int] = torch.optim.AdamW(model.parameters() , lr=1.0 )
_lowerCamelCase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(__A , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
_lowerCamelCase : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCamelCase : Optional[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def A__ ( __A ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__A )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCamelCase):
_lowerCamelCase : str = Accelerator(cpu=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[Any] = GradientState()
assert state.num_steps == 1
_lowerCamelCase : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCamelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = create_components()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Tuple = accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_UpperCamelCase : str , **_UpperCamelCase : Optional[Any]):
pass
with patch("""torch.cuda.set_device""" , _UpperCamelCase), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64"""):
_lowerCamelCase : Any = Accelerator()
self.assertEqual(str(accelerator.state.device) , """cuda:64""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = get_signature(_UpperCamelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase)
# make sure random weights don't match
load_random_weights(_UpperCamelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(_UpperCamelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase)) < 1E-3)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = get_signature(_UpperCamelCase)
# saving hook
def save_config(_UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]):
_lowerCamelCase : int = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(_UpperCamelCase , """data.json""") , """w""") as f:
json.dump(_UpperCamelCase , _UpperCamelCase)
# loading hook
def load_config(_UpperCamelCase : Optional[int] , _UpperCamelCase : Dict):
with open(os.path.join(_UpperCamelCase , """data.json""") , """r""") as f:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[int] = config["""class_name"""]
_lowerCamelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(_UpperCamelCase)
_lowerCamelCase : List[str] = accelerator.register_load_state_pre_hook(_UpperCamelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase)
# make sure random weights don't match with hooks
load_random_weights(_UpperCamelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase)) > 1E-3)
# random class name to verify correct one is loaded
_lowerCamelCase : Tuple = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCamelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase)
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCamelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase)) > 1E-3)
# random class name to verify correct one is loaded
_lowerCamelCase : Optional[int] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCamelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = create_components()
_lowerCamelCase : List[str] = None
# This should work
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertTrue(dummy_obj is None)
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = create_components()
_lowerCamelCase : List[str] = [1, 2, 3]
# This should work
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(
getattr(_UpperCamelCase , """_is_accelerate_prepared""" , _UpperCamelCase) , _UpperCamelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(_UpperCamelCase , """_is_accelerate_prepared""" , _UpperCamelCase) , _UpperCamelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCamelCase , """_is_accelerate_prepared""" , _UpperCamelCase) , _UpperCamelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCamelCase , """_is_accelerate_prepared""" , _UpperCamelCase) , _UpperCamelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCamelCase , """_is_accelerate_prepared""" , _UpperCamelCase) , _UpperCamelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_UpperCamelCase , """_is_accelerate_prepared""" , _UpperCamelCase) , _UpperCamelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
_lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_UpperCamelCase , device_map={"""""": 0} , )
_lowerCamelCase : Union[str, Any] = Accelerator()
# This should work
_lowerCamelCase : int = accelerator.prepare(_UpperCamelCase)
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
_lowerCamelCase : List[str] = Accelerator()
with init_empty_weights():
_lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCamelCase : Union[str, Any] = infer_auto_device_map(_UpperCamelCase)
_lowerCamelCase : Any = """cpu"""
_lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=_UpperCamelCase , load_in_abit=_UpperCamelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCamelCase)
# This should not work and get value error
with self.assertRaises(_UpperCamelCase):
_lowerCamelCase : Tuple = accelerator.prepare(_UpperCamelCase)
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
from transformers import AutoModelForCausalLM
_lowerCamelCase : Union[str, Any] = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCamelCase : List[Any] = infer_auto_device_map(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
_lowerCamelCase : Optional[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCamelCase):
_lowerCamelCase : str = accelerator.prepare(_UpperCamelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCamelCase : int = infer_auto_device_map(_UpperCamelCase)
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
_lowerCamelCase : List[str] = Accelerator()
# This should work
_lowerCamelCase : List[Any] = accelerator.prepare(_UpperCamelCase)
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.nn.Linear(10 , 10)
_lowerCamelCase : Dict = torch.optim.SGD(model.parameters() , lr=0.0_1)
_lowerCamelCase : Tuple = Accelerator(cpu=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = accelerator.prepare(_UpperCamelCase)
| 15 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 1 |
from __future__ import annotations
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = len(__A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowerCamelCase : str = i + 1
else:
_lowerCamelCase : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 1 |
import copy
import re
class __snake_case :
'''simple docstring'''
_snake_case = 'hp'
_snake_case = {}
_snake_case = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = prefix
_lowerCamelCase : Union[str, Any] = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->int:
"""simple docstring"""
if len(_UpperCamelCase) == 0:
return ""
_lowerCamelCase : Optional[Any] = None
if any(char.isdigit() for char in word):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCamelCase) + 1):
_lowerCamelCase : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_lowerCamelCase : int = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCamelCase : Dict):
_lowerCamelCase : Union[str, Any] = """"""
while integer != 0:
_lowerCamelCase : Any = chr(ord("""A""") + integer % 10) + s
integer //= 10
return s
_lowerCamelCase : int = 0
while True:
_lowerCamelCase : int = word + """#""" + int_to_alphabetic(_UpperCamelCase)
if sword in info["reverse_short_word"]:
continue
else:
_lowerCamelCase : Optional[Any] = sword
break
_lowerCamelCase : Tuple = short_word
_lowerCamelCase : Union[str, Any] = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = param_name.split("""_""")
_lowerCamelCase : int = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_lowerCamelCase : str = ["""""", """_"""]
for separator in separators:
_lowerCamelCase : Optional[Any] = separator.join(_UpperCamelCase)
if shortname not in info["reverse_short_param"]:
_lowerCamelCase : Any = shortname
_lowerCamelCase : Optional[int] = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = short_name
_lowerCamelCase : Optional[int] = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple) ->int:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
_lowerCamelCase : str = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
_lowerCamelCase : int = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCamelCase : List[str]) ->Dict:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
_lowerCamelCase : List[str] = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_lowerCamelCase : Any = cls.NAMING_INFO["""short_param"""][k]
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : int = 1 if v else 0
_lowerCamelCase : List[str] = """""" if isinstance(_UpperCamelCase , (int, float)) else """-"""
_lowerCamelCase : Dict = F"""{key}{sep}{v}"""
name.append(_UpperCamelCase)
return "_".join(_UpperCamelCase)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , _UpperCamelCase : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = repr[len(cls.PREFIX) + 1 :]
if repr == "":
_lowerCamelCase : List[Any] = []
else:
_lowerCamelCase : Dict = repr.split("""_""")
_lowerCamelCase : Tuple = {}
for value in values:
if "-" in value:
_lowerCamelCase , _lowerCamelCase : Tuple = value.split("""-""")
else:
_lowerCamelCase : int = re.sub("""[0-9.]""" , """""" , _UpperCamelCase)
_lowerCamelCase : Tuple = float(re.sub("""[^0-9.]""" , """""" , _UpperCamelCase))
_lowerCamelCase : Dict = cls.NAMING_INFO["""reverse_short_param"""][p_k]
_lowerCamelCase : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_lowerCamelCase : List[Any] = cls.DEFAULTS[k]
return parameters
| 15 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCamelCase : List[Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCamelCase : Optional[Any] = {"""unk_token""": """<unk>"""}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(_UpperCamelCase))
_lowerCamelCase : List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : int) ->Dict:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **_UpperCamelCase : List[str]) ->List[Any]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
_lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : str = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCamelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Tuple = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : Any = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Tuple = """lower newer"""
_lowerCamelCase : Optional[int] = processor(text=_UpperCamelCase)
_lowerCamelCase : int = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = """lower newer"""
_lowerCamelCase : Optional[int] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[str] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.batch_decode(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """lower newer"""
_lowerCamelCase : Any = self.prepare_image_inputs()
_lowerCamelCase : List[str] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def A__ ( __A , __A , __A , __A , __A , __A = None , ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
if train_file is not None:
_lowerCamelCase : Union[str, Any] = [train_file]
if eval_file is not None:
_lowerCamelCase : Dict = [eval_file]
if test_file is not None:
_lowerCamelCase : Dict = [test_file]
_lowerCamelCase : Any = datasets.load_dataset("""csv""" , data_files=__A )
_lowerCamelCase : Any = list(ds[list(files.keys() )[0]].features.keys() )
_lowerCamelCase : Optional[Any] = features_name.pop(__A )
_lowerCamelCase : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowerCamelCase : Tuple = {label: i for i, label in enumerate(__A )}
_lowerCamelCase : Optional[Any] = tokenizer.model_input_names
_lowerCamelCase : Dict = {}
if len(__A ) == 1:
for k in files.keys():
_lowerCamelCase : Optional[Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding="""max_length""" ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
_lowerCamelCase : Tuple = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding="""max_length""" , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowerCamelCase : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowerCamelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowerCamelCase : List[str] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : str = labelaid[ex[label_name]]
yield (d, label)
_lowerCamelCase : int = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowerCamelCase : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowerCamelCase : int = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowerCamelCase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowerCamelCase : int = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowerCamelCase : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase : List[str] =logging.getLogger(__name__)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(metadata={'help': 'Which column contains the label'} )
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'The path of the training file'} )
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'The path of the development file'} )
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'The path of the test file'} )
_snake_case = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def A__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowerCamelCase : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A ) -> Dict:
_lowerCamelCase : str = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowerCamelCase : Any = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCamelCase : List[str] = trainer.evaluate()
_lowerCamelCase : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(__A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : int ={
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : Any ={
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
lowerCAmelCase : Optional[Any] ={
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = ['input_ids', 'attention_mask']
_snake_case = DistilBertTokenizer
def __init__( self : Tuple , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]="[UNK]" , _UpperCamelCase : int="[SEP]" , _UpperCamelCase : Optional[int]="[PAD]" , _UpperCamelCase : List[Any]="[CLS]" , _UpperCamelCase : int="[MASK]" , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : str , ) ->str:
"""simple docstring"""
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , _UpperCamelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCamelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCamelCase) != tokenize_chinese_chars
):
_lowerCamelCase : Dict = getattr(_UpperCamelCase , normalizer_state.pop("""type"""))
_lowerCamelCase : int = do_lower_case
_lowerCamelCase : Optional[Any] = strip_accents
_lowerCamelCase : Dict = tokenize_chinese_chars
_lowerCamelCase : Optional[int] = normalizer_class(**_UpperCamelCase)
_lowerCamelCase : List[str] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str=None) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
_lowerCamelCase : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase)
return tuple(_UpperCamelCase)
| 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] ={}
lowerCAmelCase : Dict[Optional[str], str] ={}
lowerCAmelCase : Dict[Optional[str], Exception] ={}
def A__ ( __A , __A , __A = None , ):
'''simple docstring'''
_lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_lowerCamelCase : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_lowerCamelCase : Union[str, Any] = format_type
def A__ ( __A , __A , __A = None ):
'''simple docstring'''
_lowerCamelCase : int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : Optional[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
lowerCAmelCase : Any =ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
lowerCAmelCase : Dict =ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
lowerCAmelCase : Union[str, Any] =ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def A__ ( __A ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A__ ( __A , **__A ):
'''simple docstring'''
_lowerCamelCase : Any = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 1 |
def A__ ( __A ):
'''simple docstring'''
def merge(__A , __A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
_lowerCamelCase : Optional[int] = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict =input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase : Dict =[int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 1 |
def A__ ( __A ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
_lowerCamelCase : str = sorted(string.lower() )
return len(__A ) == len(set(__A ) )
if __name__ == "__main__":
lowerCAmelCase : Tuple =input("Enter a string ").strip()
lowerCAmelCase : Optional[Any] =is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 15 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
lowerCAmelCase : Optional[int] =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_lowerCamelCase : Dict = getattr(__A , __A )
if weight_type is not None:
_lowerCamelCase : Optional[Any] = getattr(__A , __A ).shape
else:
_lowerCamelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : int = value
elif weight_type == "weight_g":
_lowerCamelCase : List[Any] = value
elif weight_type == "weight_v":
_lowerCamelCase : Dict = value
elif weight_type == "bias":
_lowerCamelCase : Dict = value
else:
_lowerCamelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : Any = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == """group""" , )
_lowerCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : str = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
_lowerCamelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCamelCase : Dict = name.split(__A )[0].split(""".""" )[-2]
_lowerCamelCase : List[str] = mapped_key.replace("""*""" , __A )
if "weight_g" in name:
_lowerCamelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
_lowerCamelCase : List[Any] = """weight_v"""
elif "bias" in name:
_lowerCamelCase : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Optional[Any] = """weight"""
else:
_lowerCamelCase : str = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = full_name.split("""conv_layers.""" )[-1]
_lowerCamelCase : Dict = name.split(""".""" )
_lowerCamelCase : int = int(items[0] )
_lowerCamelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def A__ ( __A , __A , __A=None , __A=None , __A=True ):
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : int = UniSpeechSatConfig.from_pretrained(__A )
else:
_lowerCamelCase : int = UniSpeechSatConfig()
_lowerCamelCase : Tuple = """"""
if is_finetuned:
_lowerCamelCase : Dict = UniSpeechSatForCTC(__A )
else:
_lowerCamelCase : Union[str, Any] = UniSpeechSatForPreTraining(__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCamelCase : List[Any] = model[0].eval()
recursively_load_weights(__A , __A )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCAmelCase : Any =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'decision_transformer'
_snake_case = ['past_key_values']
_snake_case = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , _UpperCamelCase : int=17 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Optional[Any]=128 , _UpperCamelCase : Tuple=4096 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[str]=1 , _UpperCamelCase : str=1024 , _UpperCamelCase : str=3 , _UpperCamelCase : str=1 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Dict="relu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Union[str, Any]=1E-5 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Tuple=5_0256 , _UpperCamelCase : Optional[Any]=5_0256 , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Union[str, Any]=False , **_UpperCamelCase : Union[str, Any] , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = state_dim
_lowerCamelCase : str = act_dim
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Any = max_ep_len
_lowerCamelCase : Union[str, Any] = action_tanh
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Dict = n_layer
_lowerCamelCase : str = n_head
_lowerCamelCase : Union[str, Any] = n_inner
_lowerCamelCase : Any = activation_function
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : int = embd_pdrop
_lowerCamelCase : Dict = attn_pdrop
_lowerCamelCase : List[str] = layer_norm_epsilon
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = scale_attn_weights
_lowerCamelCase : str = use_cache
_lowerCamelCase : str = scale_attn_by_inverse_layer_idx
_lowerCamelCase : int = reorder_and_upcast_attn
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Dict = eos_token_id
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
| 15 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : int = """ylacombe/bark-small"""
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Tuple = """en_speaker_1"""
_lowerCamelCase : Tuple = """This is a test string"""
_lowerCamelCase : Optional[int] = """speaker_embeddings_path.json"""
_lowerCamelCase : int = """speaker_embeddings"""
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : int = BarkProcessor(tokenizer=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCamelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCamelCase : int = 35
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Optional[Any] = 8
_lowerCamelCase : str = {
"""semantic_prompt""": np.ones(_UpperCamelCase),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len)),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
_lowerCamelCase : List[str] = processor(text=self.input_string , voice_preset=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([])).tolist())
# test loading voice preset from npz file
_lowerCamelCase : int = os.path.join(self.tmpdirname , """file.npz""")
np.savez(_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : str = processor(text=self.input_string , voice_preset=_UpperCamelCase)
_lowerCamelCase : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([])).tolist())
# test loading voice preset from the hub
_lowerCamelCase : List[str] = processor(text=self.input_string , voice_preset=self.voice_preset)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = BarkProcessor(tokenizer=_UpperCamelCase)
_lowerCamelCase : str = processor(text=self.input_string)
_lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 15 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 1 |
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = """"""
for i in table:
res += inp[i - 1]
return res
def A__ ( __A ):
'''simple docstring'''
return data[1:] + data[0]
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = """"""
for i in range(len(__A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = int("""0b""" + data[0] + data[-1] , 2 )
_lowerCamelCase : Optional[Any] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Dict = message[:4]
_lowerCamelCase : Any = message[4:]
_lowerCamelCase : int = apply_table(__A , __A )
_lowerCamelCase : Dict = xor(__A , __A )
_lowerCamelCase : Union[str, Any] = apply_sbox(__A , temp[:4] ) # noqa: E741
_lowerCamelCase : int = apply_sbox(__A , temp[4:] )
_lowerCamelCase : int = """0""" * (2 - len(__A )) + l # noqa: E741
_lowerCamelCase : List[Any] = """0""" * (2 - len(__A )) + r
_lowerCamelCase : str = apply_table(l + r , __A )
_lowerCamelCase : Optional[int] = xor(__A , __A )
return temp + right
if __name__ == "__main__":
lowerCAmelCase : Any =input("Enter 10 bit key: ")
lowerCAmelCase : Any =input("Enter 8 bit message: ")
lowerCAmelCase : Any =[6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase : str =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase : Union[str, Any] =[2, 4, 3, 1]
lowerCAmelCase : int =[2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase : Dict =[4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase : int =[4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase : Optional[int] =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase : Union[str, Any] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase : Optional[int] =apply_table(key, paa_table)
lowerCAmelCase : List[Any] =temp[:5]
lowerCAmelCase : str =temp[5:]
lowerCAmelCase : Union[str, Any] =left_shift(left)
lowerCAmelCase : str =left_shift(right)
lowerCAmelCase : List[Any] =apply_table(left + right, pa_table)
lowerCAmelCase : List[str] =left_shift(left)
lowerCAmelCase : Optional[int] =left_shift(right)
lowerCAmelCase : Optional[int] =left_shift(left)
lowerCAmelCase : Any =left_shift(right)
lowerCAmelCase : str =apply_table(left + right, pa_table)
# encryption
lowerCAmelCase : Union[str, Any] =apply_table(message, IP)
lowerCAmelCase : Optional[int] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase : int =temp[4:] + temp[:4]
lowerCAmelCase : Any =function(expansion, sa, sa, keya, temp)
lowerCAmelCase : Dict =apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
lowerCAmelCase : Any =apply_table(CT, IP)
lowerCAmelCase : List[Any] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase : Any =temp[4:] + temp[:4]
lowerCAmelCase : Union[str, Any] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase : Tuple =apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = BlenderbotSmallTokenizer
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
_lowerCamelCase : Union[str, Any] = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
_lowerCamelCase : List[Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : Optional[Any] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
_lowerCamelCase : List[str] = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(_UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **_UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Any = """adapt act apte"""
_lowerCamelCase : Any = """adapt act apte"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_lowerCamelCase : Any = """adapt act apte"""
_lowerCamelCase : List[Any] = ["""adapt""", """act""", """ap@@""", """te"""]
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : List[str] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase) , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""")
assert tok("""sam""").input_ids == [1384]
_lowerCamelCase : int = """I am a small frog."""
_lowerCamelCase : Tuple = tok([src_text] , padding=_UpperCamelCase , truncation=_UpperCamelCase)["""input_ids"""]
_lowerCamelCase : List[Any] = tok.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""")
_lowerCamelCase : str = """I am a small frog ."""
_lowerCamelCase : str = """."""
_lowerCamelCase : Any = tok(_UpperCamelCase)["""input_ids"""]
_lowerCamelCase : Tuple = tok(_UpperCamelCase)["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 15 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase : Optional[int] =["gpt2"]
lowerCAmelCase : Optional[int] ="gpt2"
if is_tf_available():
class __snake_case ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : List[str]) ->Optional[Any]:
"""simple docstring"""
super().__init__()
_lowerCamelCase : int = tokenizer
_lowerCamelCase : Any = AutoConfig.from_pretrained(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = TFGPTaLMHeadModel.from_config(_UpperCamelCase)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.tokenizer(_UpperCamelCase)
_lowerCamelCase : Dict = tokenized["""input_ids"""].to_tensor()
_lowerCamelCase : int = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCamelCase : Union[str, Any] = self.model(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
super().setUp()
_lowerCamelCase : List[str] = [GPTaTokenizer.from_pretrained(_UpperCamelCase) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCamelCase : Optional[int] = [TFGPTaTokenizer.from_pretrained(_UpperCamelCase) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_lowerCamelCase : int = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCamelCase : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1]))
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
_lowerCamelCase : List[str] = tokenizer([test_inputs] , return_tensors="""tf""")
_lowerCamelCase : Tuple = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCamelCase : Any = python_outputs[key].numpy()
_lowerCamelCase : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(_UpperCamelCase , tf.intaa) == tf_outputs_values))
@slow
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Dict = tf.function(_UpperCamelCase)
for test_inputs in self.test_sentences:
_lowerCamelCase : List[Any] = tf.constant(_UpperCamelCase)
_lowerCamelCase : str = compiled_tokenizer(_UpperCamelCase)
_lowerCamelCase : Any = tf_tokenizer(_UpperCamelCase)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Optional[Any] = ModelToSave(tokenizer=_UpperCamelCase)
_lowerCamelCase : Any = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCamelCase : Any = model.serving(_UpperCamelCase) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCamelCase : Dict = Path(_UpperCamelCase) / """saved.model"""
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={"""serving_default""": model.serving})
_lowerCamelCase : Union[str, Any] = tf.saved_model.load(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = loaded_model.signatures["""serving_default"""](_UpperCamelCase)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Dict = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCamelCase : Optional[Any] = tf_tokenizer(_UpperCamelCase) # Build model with some sample inputs
_lowerCamelCase : List[Any] = tf_tokenizer.get_config()
_lowerCamelCase : Union[str, Any] = TFGPTaTokenizer.from_config(_UpperCamelCase)
_lowerCamelCase : List[Any] = model_from_config(_UpperCamelCase)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCamelCase : Optional[Any] = 12_3123
for max_length in [3, 5, 1024]:
_lowerCamelCase : List[str] = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCamelCase : Optional[Any] = tf_tokenizer(_UpperCamelCase , max_length=_UpperCamelCase)
_lowerCamelCase : Tuple = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 15 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['image_processor', 'tokenizer']
_snake_case = 'LayoutLMv3ImageProcessor'
_snake_case = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Dict , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _UpperCamelCase , )
_lowerCamelCase : List[Any] = kwargs.pop("""feature_extractor""")
_lowerCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_UpperCamelCase , _UpperCamelCase)
def __call__( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Union[str, Any] , ) ->BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""")
# first, apply the image processor
_lowerCamelCase : str = self.image_processor(images=_UpperCamelCase , return_tensors=_UpperCamelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCamelCase : Union[str, Any] = features["""words"""]
_lowerCamelCase : Optional[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel values
_lowerCamelCase : List[Any] = features.pop("""pixel_values""")
if return_overflowing_tokens is True:
_lowerCamelCase : List[str] = self.get_overflowing_images(_UpperCamelCase , encoded_inputs["""overflow_to_sample_mapping"""])
_lowerCamelCase : int = images
return encoded_inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCamelCase) != len(_UpperCamelCase):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F""" {len(_UpperCamelCase)} and {len(_UpperCamelCase)}""")
return images_with_overflow
def _SCREAMING_SNAKE_CASE ( self : List[str] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[Any]) ->str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *_UpperCamelCase : int , **_UpperCamelCase : Any) ->int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCamelCase , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCamelCase , )
return self.image_processor
| 15 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase : List[Any] =pytest.mark.integration
@require_faiss
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(_UpperCamelCase) for x in np.arange(30).tolist()]})
return dset
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
import faiss
_lowerCamelCase : Dataset = self._create_dummy_dataset()
_lowerCamelCase : Optional[Any] = dset.map(
lambda _UpperCamelCase , _UpperCamelCase: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=_UpperCamelCase , keep_in_memory=_UpperCamelCase)
_lowerCamelCase : Dict = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT)
_lowerCamelCase , _lowerCamelCase : Optional[int] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
dset.drop_index("""vecs""")
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
import faiss
_lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowerCamelCase , _lowerCamelCase : Optional[int] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
import faiss
_lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name)
dset.load_faiss_index("""vecs2""" , tmp_file.name)
os.unlink(tmp_file.name)
_lowerCamelCase , _lowerCamelCase : str = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
_lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="""vecs""")
dset.drop_index("""vecs""")
self.assertRaises(_UpperCamelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa)))
def _SCREAMING_SNAKE_CASE ( self : int) ->Dict:
"""simple docstring"""
from elasticsearch import Elasticsearch
_lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""") as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""") as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""") as mocked_bulk:
_lowerCamelCase : Union[str, Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30)
_lowerCamelCase : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
_lowerCamelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : List[Any] = dset.get_nearest_examples("""filename""" , """my_name-train_29""")
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""")
@require_faiss
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
"""simple docstring"""
import faiss
_lowerCamelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 10)
# single query
_lowerCamelCase : List[Any] = np.zeros(5 , dtype=np.floataa)
_lowerCamelCase : int = 1
_lowerCamelCase , _lowerCamelCase : Tuple = index.search(_UpperCamelCase)
self.assertRaises(_UpperCamelCase , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
_lowerCamelCase : List[str] = np.eye(5 , dtype=np.floataa)[::-1]
_lowerCamelCase , _lowerCamelCase : Tuple = index.search_batch(_UpperCamelCase)
self.assertRaises(_UpperCamelCase , index.search_batch , queries[0])
_lowerCamelCase : int = [scores[0] for scores in total_scores]
_lowerCamelCase : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
import faiss
_lowerCamelCase : List[str] = FaissIndex(string_factory="""Flat""")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
_lowerCamelCase : Union[str, Any] = FaissIndex(string_factory="""LSH""")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(_UpperCamelCase):
_lowerCamelCase : List[str] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
import faiss
_lowerCamelCase : str = faiss.IndexFlat(5)
_lowerCamelCase : List[str] = FaissIndex(custom_index=_UpperCamelCase)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
import faiss
_lowerCamelCase : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase) as tmp_file:
index.save(tmp_file.name)
_lowerCamelCase : Optional[int] = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
_lowerCamelCase : Optional[Any] = np.zeros(5 , dtype=np.floataa)
_lowerCamelCase : Tuple = 1
_lowerCamelCase , _lowerCamelCase : Optional[int] = index.search(_UpperCamelCase)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A__ ( __A ):
'''simple docstring'''
import faiss
_lowerCamelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowerCamelCase : int = """index.faiss"""
_lowerCamelCase : List[Any] = F"""mock://{index_name}"""
index.save(__A , storage_options=mockfs.storage_options )
_lowerCamelCase : str = FaissIndex.load(__A , storage_options=mockfs.storage_options )
_lowerCamelCase : List[str] = np.zeros(5 , dtype=np.floataa )
_lowerCamelCase : Any = 1
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = index.search(__A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""") as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""") as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""") as mocked_bulk:
_lowerCamelCase : Optional[int] = Elasticsearch()
_lowerCamelCase : List[Any] = {"""acknowledged""": True}
_lowerCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=_UpperCamelCase)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(["""foo""", """bar""", """foobar"""])
# single query
_lowerCamelCase : int = """foo"""
_lowerCamelCase : List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
_lowerCamelCase , _lowerCamelCase : Optional[int] = index.search(_UpperCamelCase)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
_lowerCamelCase : Dict = """foo"""
_lowerCamelCase : str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
_lowerCamelCase , _lowerCamelCase : List[Any] = index.search(_UpperCamelCase , request_timeout=30)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
_lowerCamelCase : List[str] = ["""foo""", """bar""", """foobar"""]
_lowerCamelCase : Union[str, Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
_lowerCamelCase , _lowerCamelCase : Optional[Any] = index.search_batch(_UpperCamelCase)
_lowerCamelCase : str = [scores[0] for scores in total_scores]
_lowerCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([1, 1, 1] , _UpperCamelCase)
# batched queries with timeout
_lowerCamelCase : Union[str, Any] = ["""foo""", """bar""", """foobar"""]
_lowerCamelCase : str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
_lowerCamelCase , _lowerCamelCase : Any = index.search_batch(_UpperCamelCase , request_timeout=30)
_lowerCamelCase : List[Any] = [scores[0] for scores in total_scores]
_lowerCamelCase : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([1, 1, 1] , _UpperCamelCase)
| 15 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = MgpstrTokenizer
_snake_case = False
_snake_case = {}
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
super().setUp()
# fmt: off
_lowerCamelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
def _SCREAMING_SNAKE_CASE ( self : Any , **_UpperCamelCase : List[Any]) ->int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : str = """tester"""
_lowerCamelCase : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""")
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.get_tokenizers(do_lower_case=_UpperCamelCase)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
_lowerCamelCase : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token})
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=_UpperCamelCase)
self.assertEqual(len(_UpperCamelCase) , 1)
_lowerCamelCase : Any = tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
self.assertTrue(special_token not in decoded)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
_lowerCamelCase , _lowerCamelCase : List[Any] = self.get_input_output_texts(_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer.tokenize(_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
_lowerCamelCase : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertNotEqual(len(_UpperCamelCase) , 0)
_lowerCamelCase : str = tokenizer.decode(_UpperCamelCase)
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase)
self.assertEqual(text_a.replace(""" """ , """""") , _UpperCamelCase)
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""")
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""")
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
pass
| 15 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Union[str, Any] ={
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 15 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase : Optional[int] =re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowerCAmelCase : str =10
lowerCAmelCase : Optional[Any] =256
def A__ ( __A ):
'''simple docstring'''
if len(__A ) < MIN_NUM_TOKENS:
return None
_lowerCamelCase : Union[str, Any] = MinHash(num_perm=__A )
for token in set(__A ):
min_hash.update(token.encode() )
return min_hash
def A__ ( __A ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(__A ) if len(t.strip() ) > 0}
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , *,
_UpperCamelCase : float = 0.8_5 , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = duplication_jaccard_threshold
_lowerCamelCase : List[str] = NUM_PERM
_lowerCamelCase : Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
_lowerCamelCase : Tuple = defaultdict(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : MinHash) ->None:
"""simple docstring"""
_lowerCamelCase : List[str] = self._index.query(_UpperCamelCase)
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""")
return
self._index.insert(_UpperCamelCase , _UpperCamelCase)
if len(_UpperCamelCase) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[List[Dict]]:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCamelCase : Optional[int] = [base] + list(_UpperCamelCase)
# reformat the cluster to be a list of dict
_lowerCamelCase : Dict = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase)
return duplicate_clusters
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Union[str, Any]) ->None:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_duplicate_clusters()
with open(_UpperCamelCase , """w""") as f:
json.dump(_UpperCamelCase , _UpperCamelCase)
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[str] = element
_lowerCamelCase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def A__ ( __A ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = DuplicationIndex(duplication_jaccard_threshold=__A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A ) ) , max_queue_size=100 ) ):
di.add(__A , __A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = get_tokens(__A )
_lowerCamelCase : Any = get_tokens(__A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase : Optional[int] =None
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = []
for elementa in cluster:
_lowerCamelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCamelCase : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__A , __A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCamelCase : str = 1
extremes.append(__A )
return extremes
def A__ ( __A , __A , __A ):
'''simple docstring'''
global _shared_dataset
_lowerCamelCase : int = dataset
_lowerCamelCase : str = []
_lowerCamelCase : Optional[Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=__A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A ) , ):
extremes_list.append(__A )
return extremes_list
def A__ ( __A , __A = 0.85 ):
'''simple docstring'''
_lowerCamelCase : Tuple = make_duplicate_clusters(__A , __A )
_lowerCamelCase : Optional[int] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = find_extremes(__A , __A , __A )
for extremes in extremes_clusters:
for element in extremes:
_lowerCamelCase : Optional[Any] = element
_lowerCamelCase : Any = duplicate_indices - set(extreme_dict.keys() )
_lowerCamelCase : List[Any] = dataset.filter(lambda __A , __A : idx not in remove_indices , with_indices=__A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCamelCase : str = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCamelCase : List[str] = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(__A )}""" )
print(F"""Number of duplicate clusters: {len(__A )}""" )
print(F"""Files in duplicate cluster: {len(__A )}""" )
print(F"""Unique files in duplicate cluster: {len(__A )}""" )
print(F"""Filtered dataset size: {len(__A )}""" )
return ds_filter, duplicate_clusters
| 15 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : str ={
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
lowerCAmelCase : Union[str, Any] ={
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Any = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCamelCase : int = bs[:]
_lowerCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
_lowerCamelCase : Any = [chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = set()
_lowerCamelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : str = char
return pairs
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['input_ids', 'attention_mask']
def __init__( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]="replace" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : Dict="<s>" , _UpperCamelCase : str="<unk>" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple=False , **_UpperCamelCase : int , ) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else bos_token
_lowerCamelCase : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else eos_token
_lowerCamelCase : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else sep_token
_lowerCamelCase : Any = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else cls_token
_lowerCamelCase : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else unk_token
_lowerCamelCase : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Any = json.load(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : int = errors # how to handle errors in decoding
_lowerCamelCase : Any = bytes_to_unicode()
_lowerCamelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding="""utf-8""") as merges_handle:
_lowerCamelCase : Optional[int] = merges_handle.read().split("""\n""")[1:-1]
_lowerCamelCase : Optional[int] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCamelCase : int = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase : Optional[Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCamelCase : List[str] = tuple(_UpperCamelCase)
_lowerCamelCase : str = get_pairs(_UpperCamelCase)
if not pairs:
return token
while True:
_lowerCamelCase : int = min(_UpperCamelCase , key=lambda _UpperCamelCase: self.bpe_ranks.get(_UpperCamelCase , float("""inf""")))
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase : Optional[Any] = bigram
_lowerCamelCase : int = []
_lowerCamelCase : Any = 0
while i < len(_UpperCamelCase):
try:
_lowerCamelCase : Union[str, Any] = word.index(_UpperCamelCase , _UpperCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCamelCase : Optional[int] = j
if word[i] == first and i < len(_UpperCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCamelCase : Optional[Any] = tuple(_UpperCamelCase)
_lowerCamelCase : List[Any] = new_word
if len(_UpperCamelCase) == 1:
break
else:
_lowerCamelCase : Dict = get_pairs(_UpperCamelCase)
_lowerCamelCase : List[Any] = """ """.join(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = []
for token in re.findall(self.pat , _UpperCamelCase):
_lowerCamelCase : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase).split(""" """))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : List[Any]) ->List[Any]:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """""".join(_UpperCamelCase)
_lowerCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
_lowerCamelCase : Dict = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Any = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
_lowerCamelCase : Union[str, Any] = 0
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""")
_lowerCamelCase : Dict = token_index
writer.write(""" """.join(_UpperCamelCase) + """\n""")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
_lowerCamelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase)) + [1]
return [1] + ([0] * len(_UpperCamelCase)) + [1, 1] + ([0] * len(_UpperCamelCase)) + [1]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False , **_UpperCamelCase : str) ->int:
"""simple docstring"""
_lowerCamelCase : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase) > 0 and not text[0].isspace()):
_lowerCamelCase : Dict = """ """ + text
return (text, kwargs)
| 15 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : Dict =logging.getLogger()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_lowerCamelCase : int = parser.parse_args()
return args.f
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->None:
"""simple docstring"""
_lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : List[str]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(_UpperCamelCase , """argv""" , _UpperCamelCase):
_lowerCamelCase : Tuple = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCamelCase , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_UpperCamelCase)
_lowerCamelCase : int = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_UpperCamelCase)
_lowerCamelCase : str = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_UpperCamelCase)
| 15 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A__ ( __A = 8 ):
'''simple docstring'''
_lowerCamelCase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(__A ) for _ in range(__A ) )
def A__ ( __A , __A ):
'''simple docstring'''
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__A )
_lowerCamelCase : Optional[int] = i // 3
_lowerCamelCase : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCamelCase : Optional[Any] = (
chars_incl
+ random(__A , quotient + remainder )
+ random(__A , __A )
+ random(__A , __A )
)
_lowerCamelCase : List[Any] = list(__A )
shuffle(__A )
return "".join(__A )
# random is a generalised function for letters, characters and numbers
def A__ ( __A , __A ):
'''simple docstring'''
return "".join(secrets.choice(__A ) for _ in range(__A ) )
def A__ ( __A , __A ):
'''simple docstring'''
pass # Put your code here...
def A__ ( __A , __A ):
'''simple docstring'''
pass # Put your code here...
def A__ ( __A , __A ):
'''simple docstring'''
pass # Put your code here...
def A__ ( __A , __A = 8 ):
'''simple docstring'''
if len(__A ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCamelCase : Dict = any(char in ascii_uppercase for char in password )
_lowerCamelCase : str = any(char in ascii_lowercase for char in password )
_lowerCamelCase : Any = any(char in digits for char in password )
_lowerCamelCase : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = int(input("""Please indicate the max length of your password: """ ).strip() )
_lowerCamelCase : List[Any] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(__A ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(__A , __A ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase : Optional[int] =TypeVar("T")
class __snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : list[T] , _UpperCamelCase : Callable[[T, T], T]) ->None:
"""simple docstring"""
_lowerCamelCase : Any | T = None
_lowerCamelCase : int = len(_UpperCamelCase)
_lowerCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
_lowerCamelCase : List[Any] = fnc
self.build()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1):
_lowerCamelCase : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : T) ->None:
"""simple docstring"""
p += self.N
_lowerCamelCase : List[Any] = v
while p > 1:
_lowerCamelCase : Union[str, Any] = p // 2
_lowerCamelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int) ->T | None: # noqa: E741
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = l + self.N, r + self.N
_lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
_lowerCamelCase : int = self.st[l] if res is None else self.fn(_UpperCamelCase , self.st[l])
if r % 2 == 0:
_lowerCamelCase : List[Any] = self.st[r] if res is None else self.fn(_UpperCamelCase , self.st[r])
_lowerCamelCase , _lowerCamelCase : Tuple = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase : Optional[int] =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowerCAmelCase : str ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowerCAmelCase : Union[str, Any] =SegmentTree(test_array, min)
lowerCAmelCase : Any =SegmentTree(test_array, max)
lowerCAmelCase : Any =SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
_lowerCamelCase : Optional[int] = reduce(__A , test_array[i : j + 1] )
_lowerCamelCase : Union[str, Any] = reduce(__A , test_array[i : j + 1] )
_lowerCamelCase : Union[str, Any] = reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase : Union[str, Any] =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 15 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 1 |
from __future__ import annotations
from math import pi
def A__ ( __A , __A , __A ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A__ ( __A , __A=7 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = None
if token is not None:
_lowerCamelCase : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_lowerCamelCase : int = """636036"""
_lowerCamelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_lowerCamelCase : List[Any] = requests.get(__A , headers=__A ).json()
return result["workflow_runs"]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_daily_ci_runs(__A )
_lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_lowerCamelCase : str = workflow_run["""id"""]
break
return workflow_run_id
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = get_last_daily_ci_runs(__A )
if workflow_run_id is not None:
_lowerCamelCase : Tuple = get_artifacts_links(worflow_run_id=__A , token=__A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_lowerCamelCase : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__A , artifact_url=__A , output_dir=__A , token=__A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
get_last_daily_ci_artifacts(__A , __A , __A )
_lowerCamelCase : List[str] = {}
for artifact_name in artifact_names:
_lowerCamelCase : str = os.path.join(__A , F"""{artifact_name}.zip""" )
if os.path.isfile(__A ):
_lowerCamelCase : Tuple = {}
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
with z.open(__A ) as f:
_lowerCamelCase : Any = f.read().decode("""UTF-8""" )
return results
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['image_processor', 'tokenizer']
_snake_case = 'AutoImageProcessor'
_snake_case = 'AutoTokenizer'
def __init__( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]) ->Tuple:
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = self.image_processor
def __call__( self : Union[str, Any] , _UpperCamelCase : List[str]=None , _UpperCamelCase : str=None , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase)
if images is not None:
_lowerCamelCase : Any = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase)
if text is not None and images is not None:
_lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase) , tensor_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *_UpperCamelCase : int , **_UpperCamelCase : Any) ->Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = None
_snake_case = None
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : int = Node(1 )
_lowerCamelCase : str = Node(2 )
_lowerCamelCase : Dict = Node(3 )
_lowerCamelCase : str = Node(4 )
_lowerCamelCase : Any = Node(5 )
return tree
def A__ ( __A ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A__ ( __A ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A__ ( __A ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A__ ( __A ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
if root is None:
return output
_lowerCamelCase : List[str] = deque([root] )
while process_queue:
_lowerCamelCase : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
def populate_output(__A , __A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
def populate_output(__A , __A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def A__ ( __A ):
'''simple docstring'''
if root is None:
return []
_lowerCamelCase : list[Sequence[Node | None]] = []
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[Any] = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
_lowerCamelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
_lowerCamelCase : List[Any] = 0
return output
def A__ ( ): # Main function for testing.
'''simple docstring'''
_lowerCamelCase : Dict = make_tree()
print(F"""In-order Traversal: {inorder(__A )}""" )
print(F"""Pre-order Traversal: {preorder(__A )}""" )
print(F"""Post-order Traversal: {postorder(__A )}""" , """\n""" )
print(F"""Height of Tree: {height(__A )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__A ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(__A , level=__A ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'detr'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[Any]=100 , _UpperCamelCase : int=6 , _UpperCamelCase : Optional[Any]=2048 , _UpperCamelCase : Union[str, Any]=8 , _UpperCamelCase : str=6 , _UpperCamelCase : List[str]=2048 , _UpperCamelCase : Optional[int]=8 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]="relu" , _UpperCamelCase : Tuple=256 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict="sine" , _UpperCamelCase : Dict="resnet50" , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=False , _UpperCamelCase : str=1 , _UpperCamelCase : int=5 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : str=5 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : List[str]=0.1 , **_UpperCamelCase : Any , ) ->str:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Dict = backbone_config.get("""model_type""")
_lowerCamelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Tuple = config_class.from_dict(_UpperCamelCase)
# set timm attributes to None
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = None, None, None
_lowerCamelCase : Optional[int] = use_timm_backbone
_lowerCamelCase : Union[str, Any] = backbone_config
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = num_queries
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Union[str, Any] = encoder_ffn_dim
_lowerCamelCase : Optional[Any] = encoder_layers
_lowerCamelCase : str = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : List[str] = decoder_layers
_lowerCamelCase : Tuple = decoder_attention_heads
_lowerCamelCase : List[str] = dropout
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : List[str] = activation_dropout
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : List[Any] = init_std
_lowerCamelCase : Optional[Any] = init_xavier_std
_lowerCamelCase : Optional[Any] = encoder_layerdrop
_lowerCamelCase : Optional[int] = decoder_layerdrop
_lowerCamelCase : Union[str, Any] = encoder_layers
_lowerCamelCase : List[Any] = auxiliary_loss
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : int = backbone
_lowerCamelCase : Optional[int] = use_pretrained_backbone
_lowerCamelCase : List[str] = dilation
# Hungarian matcher
_lowerCamelCase : int = class_cost
_lowerCamelCase : Optional[Any] = bbox_cost
_lowerCamelCase : int = giou_cost
# Loss coefficients
_lowerCamelCase : str = mask_loss_coefficient
_lowerCamelCase : List[str] = dice_loss_coefficient
_lowerCamelCase : Dict = bbox_loss_coefficient
_lowerCamelCase : Optional[int] = giou_loss_coefficient
_lowerCamelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase)
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , _UpperCamelCase : PretrainedConfig , **_UpperCamelCase : List[str]) ->List[Any]:
"""simple docstring"""
return cls(backbone_config=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict[str, any]:
"""simple docstring"""
_lowerCamelCase : Tuple = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_lowerCamelCase : List[str] = self.backbone_config.to_dict()
_lowerCamelCase : List[Any] = self.__class__.model_type
return output
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : int) ->float:
"""simple docstring"""
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
return 12
| 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A__ ( __A , __A , __A = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__A ), magnitude * sin(__A )]
return [magnitude * cos(radians(__A ) ), magnitude * sin(radians(__A ) )]
def A__ ( __A , __A , __A = 10**-1 ):
'''simple docstring'''
_lowerCamelCase : NDArray[floataa] = cross(__A , __A )
_lowerCamelCase : float = sum(__A )
return abs(__A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase : List[str] =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCAmelCase : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase : Any =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCAmelCase : int =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase : List[str] =array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCAmelCase : Any =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 15 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Union[str, Any]) ->int:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : Optional[Any] = eval_examples
_lowerCamelCase : Dict = post_process_function
_lowerCamelCase : int = quant_trainer_args
_lowerCamelCase : Union[str, Any] = 128 # default number of calibration samples
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[int]=None) ->Optional[int]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""")
_lowerCamelCase : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
_lowerCamelCase : str = self._remove_unused_columns(_UpperCamelCase , description="""Calibration""")
return DataLoader(
_UpperCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
_lowerCamelCase : Optional[int] = self.get_calib_dataloader(_UpperCamelCase)
_lowerCamelCase : List[str] = self.model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args , calib=_UpperCamelCase)
model.eval()
quant_trainer.enable_calibration(_UpperCamelCase)
logger.info("""***** Running calibration *****""")
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(_UpperCamelCase):
# Prediction step
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = self.prediction_step(_UpperCamelCase , _UpperCamelCase , prediction_loss_only=_UpperCamelCase)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_UpperCamelCase , self.quant_trainer_args)
_lowerCamelCase : Any = model
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : str = "eval") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCamelCase : Dict = self.get_eval_dataloader(_UpperCamelCase)
_lowerCamelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Dict = self.compute_metrics
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : List[Any] = eval_loop(
_UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
_lowerCamelCase : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_lowerCamelCase : Dict = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions)
_lowerCamelCase : int = self.compute_metrics(_UpperCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
_lowerCamelCase : List[str] = metrics.pop(_UpperCamelCase)
self.log(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowerCamelCase : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCamelCase)
return metrics
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str = "test") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_test_dataloader(_UpperCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCamelCase : Optional[int] = self.compute_metrics
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCamelCase : Union[str, Any] = eval_loop(
_UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
_lowerCamelCase : Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCamelCase : Optional[Any] = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions , """predict""")
_lowerCamelCase : Union[str, Any] = self.compute_metrics(_UpperCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
_lowerCamelCase : List[Any] = metrics.pop(_UpperCamelCase)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[str]="./") ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.eval_dataset
_lowerCamelCase : List[Any] = self.get_eval_dataloader(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = next(iter(_UpperCamelCase))
# saving device - to make it consistent
_lowerCamelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
# convert to tuple
_lowerCamelCase : Optional[Any] = tuple(v.to(_UpperCamelCase) for k, v in batch.items())
logger.info("""Converting model to be onnx compatible""")
from pytorch_quantization.nn import TensorQuantizer
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Union[str, Any] = self.model.to(_UpperCamelCase)
model.eval()
model.float()
_lowerCamelCase : Any = model.module if hasattr(_UpperCamelCase , """module""") else model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args)
_lowerCamelCase : Tuple = os.path.join(_UpperCamelCase , """model.onnx""")
logger.info(F"""exporting model to {output_model_file}""")
_lowerCamelCase : List[str] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , export_params=_UpperCamelCase , opset_version=13 , do_constant_folding=_UpperCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_UpperCamelCase , )
logger.info("""onnx export finished""")
| 15 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 1 |
class __snake_case :
'''simple docstring'''
def __init__( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = {}
def _SCREAMING_SNAKE_CASE ( self : str) ->None:
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(_UpperCamelCase , """ -> """ , """ -> """.join([str(_UpperCamelCase) for j in self.vertex[i]]))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int , _UpperCamelCase : int) ->None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCamelCase)
else:
# else make a new vertex
_lowerCamelCase : Tuple = [to_vertex]
def _SCREAMING_SNAKE_CASE ( self : str) ->None:
"""simple docstring"""
_lowerCamelCase : List[str] = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : list) ->None:
"""simple docstring"""
_lowerCamelCase : str = True
print(_UpperCamelCase , end=""" """)
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCamelCase , _UpperCamelCase)
if __name__ == "__main__":
lowerCAmelCase : Any =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 15 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 1 |
import numpy as np
import qiskit
def A__ ( __A = 8 , __A = None ):
'''simple docstring'''
_lowerCamelCase : Any = np.random.default_rng(seed=__A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCamelCase : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCamelCase : Dict = rng.integers(2 , size=__A )
# The set of states Alice will prepare.
_lowerCamelCase : Optional[int] = rng.integers(2 , size=__A )
# Measurement basis for Bob's qubits.
_lowerCamelCase : Dict = rng.integers(2 , size=__A )
# Quantum Circuit to simulate BB84
_lowerCamelCase : List[Any] = qiskit.QuantumCircuit(__A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A ):
if alice_state[index] == 1:
bbaa_circ.x(__A )
if alice_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A ):
if bob_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCamelCase : str = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCamelCase : int = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A )
# Returns the result of measurement.
_lowerCamelCase : Optional[Any] = job.result().get_counts(__A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCamelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCamelCase : Any = gen_key[:key_len] if len(__A ) >= key_len else gen_key.ljust(__A , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 15 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.