code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__magic_name__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def lowerCamelCase ( ):
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""")
A_ : Any = parser.parse_args()
return args.f
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="eval"):
A_ : List[str] = os.path.join(__A , F'{split}_results.json')
if os.path.exists(__A):
with open(__A , """r""") as f:
return json.load(__A)
raise ValueError(F'can\'t find {path}')
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( _snake_case ):
'''simple docstring'''
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
A_ : List[str] = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_flax_glue.main()
A_ : Dict = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
@slow
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : List[Any] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_clm_flax.main()
A_ : Union[str, Any] = get_results(lowerCAmelCase__ )
self.assertLess(result["""eval_perplexity"""] ,100 )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir()
A_ : Tuple = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_summarization_flax.main()
A_ : Optional[int] = get_results(lowerCAmelCase__ ,split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] ,10 )
self.assertGreaterEqual(result["""test_rouge2"""] ,2 )
self.assertGreaterEqual(result["""test_rougeL"""] ,7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] ,7 )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : Tuple = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_mlm_flax.main()
A_ : Any = get_results(lowerCAmelCase__ )
self.assertLess(result["""eval_perplexity"""] ,42 )
@slow
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : List[str] = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_ta_mlm_flax.main()
A_ : List[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.42 )
@slow
def _a ( self : int ):
'''simple docstring'''
A_ : str = 7 if get_gpu_count() > 1 else 2
A_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
A_ : Tuple = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_flax_ner.main()
A_ : Dict = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertGreaterEqual(result["""eval_f1"""] ,0.3 )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Any = self.get_auto_remove_tmp_dir()
A_ : Any = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowerCAmelCase__ ,"""argv""" ,lowerCAmelCase__ ):
run_qa.main()
A_ : Tuple = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_f1"""] ,30 )
self.assertGreaterEqual(result["""eval_exact"""] ,30 )
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__magic_name__ = '\\n Text data.\n Second line of data.'
__magic_name__ = 'file'
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / (FILE_PATH + """.zstd""")
A_ : Optional[Any] = bytes(_SCREAMING_SNAKE_CASE , """utf-8""")
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""") as f:
f.write(_SCREAMING_SNAKE_CASE)
return path
@pytest.fixture
def lowerCamelCase ( lowerCamelCase : Optional[int]):
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE) , """w""") as f:
f.write(_SCREAMING_SNAKE_CASE)
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""])
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Dict):
A_ : str = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Dict = input_paths[compression_format]
A_ : Optional[Any] = tmp_path / """cache"""
A_ : str = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE)
A_ : Dict = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE)
with open(_SCREAMING_SNAKE_CASE) as f:
A_ : Union[str, Any] = f.read()
with open(_SCREAMING_SNAKE_CASE) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False])
@pytest.mark.parametrize("""default_cache_dir""" , [True, False])
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Tuple):
A_ : Any = """custom_cache"""
A_ : Optional[Any] = """custom_extracted_dir"""
A_ : List[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE)
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE))
A_ : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : Tuple = xz_file
A_ : Optional[Any] = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE)
)
A_ : Optional[int] = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE)
assert Path(_SCREAMING_SNAKE_CASE).parent.parts[-2:] == expected
def lowerCamelCase ( lowerCamelCase : Tuple):
# absolute path
A_ : Tuple = str(Path(_SCREAMING_SNAKE_CASE).resolve())
assert cached_path(_SCREAMING_SNAKE_CASE) == text_file
# relative path
A_ : Union[str, Any] = str(Path(_SCREAMING_SNAKE_CASE).resolve().relative_to(Path(os.getcwd())))
assert cached_path(_SCREAMING_SNAKE_CASE) == text_file
def lowerCamelCase ( lowerCamelCase : List[str]):
# absolute path
A_ : str = str(tmp_path.resolve() / """__missing_file__.txt""")
with pytest.raises(_SCREAMING_SNAKE_CASE):
cached_path(_SCREAMING_SNAKE_CASE)
# relative path
A_ : List[str] = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE):
cached_path(_SCREAMING_SNAKE_CASE)
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Dict = get_from_cache(F'tmp://{tmpfs_file}')
with open(_SCREAMING_SNAKE_CASE) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE)
def lowerCamelCase ( ):
with pytest.raises(_SCREAMING_SNAKE_CASE):
cached_path("""https://huggingface.co""")
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE)
def lowerCamelCase ( lowerCamelCase : Any):
A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
http_head("""https://huggingface.co""")
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE)
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
ftp_head("""ftp://huggingface.co""")
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE)
def lowerCamelCase ( lowerCamelCase : Optional[int]):
A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
fsspec_head("""s3://huggingface.co""")
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__magic_name__ = "3"
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Optional[int]):
return " ".join(input_str.split()[::-1])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int] = "cpu" , lowerCamelCase : Any = None):
A_ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""")
A_ : int = v.half()
if save_path is None: # overwrite src_path
A_ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__magic_name__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__magic_name__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__magic_name__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : List[str] = len([g for position, g in enumerate(lowerCamelCase) if g == main_target[position]])
return (item, float(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Dict = random.randint(0 , len(lowerCamelCase) - 1)
A_ : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
A_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : list[str]):
A_ : int = list(lowerCamelCase)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
A_ : List[str] = random.choice(lowerCamelCase)
return "".join(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : tuple[str, float] , lowerCamelCase : list[tuple[str, float]] , lowerCamelCase : list[str] , ):
A_ : int = []
# Generate more children proportionally to the fitness score.
A_ : List[str] = int(parent_a[1] * 100) + 1
A_ : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(lowerCamelCase):
A_ : List[str] = population_score[random.randint(0 , lowerCamelCase)][0]
A_ : str = crossover(parent_a[0] , lowerCamelCase)
# Append new string to the population list.
pop.append(mutate(lowerCamelCase , lowerCamelCase))
pop.append(mutate(lowerCamelCase , lowerCamelCase))
return pop
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : list[str] , lowerCamelCase : bool = True):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
A_ : str = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCamelCase)
# Verify that the target contains no genes besides the ones inside genes variable.
A_ : List[Any] = sorted({c for c in target if c not in genes})
if not_in_genes_list:
A_ : Any = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCamelCase)
# Generate random starting population.
A_ : Tuple = []
for _ in range(lowerCamelCase):
population.append("""""".join([random.choice(lowerCamelCase) for i in range(len(lowerCamelCase))]))
# Just some logs to know what the algorithms is doing.
A_ : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A_ : Optional[Any] = [evaluate(lowerCamelCase , lowerCamelCase) for item in population]
# Check if there is a matching evolution.
A_ : int = sorted(lowerCamelCase , key=lambda lowerCamelCase: x[1] , reverse=lowerCamelCase)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A_ : int = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(lowerCamelCase)
# Normalize population score to be between 0 and 1.
A_ : Union[str, Any] = [
(item, score / len(lowerCamelCase)) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase):
population.extend(select(population_score[int(lowerCamelCase)] , lowerCamelCase , lowerCamelCase))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase) > N_POPULATION:
break
if __name__ == "__main__":
__magic_name__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__magic_name__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__magic_name__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : List[str]=0):
if name is None:
A_ : Optional[Any] = None
else:
A_ : int = '''.''' * max(0 , spaces - 2) + '''# {:''' + str(50 - spaces) + '''s}'''
A_ : Optional[int] = fmt.format(__A)
# Print and recurse (if needed).
if isinstance(__A , __A):
if msg is not None:
print(__A)
for k in val.keys():
recursive_print(__A , val[k] , spaces + 2)
elif isinstance(__A , torch.Tensor):
print(__A , """:""" , val.size())
else:
print(__A , """:""" , __A)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
A_ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
A_ : int = param.view(*__A)
A_ : Optional[Any] = param.transpose(0 , 2)
A_ : Union[str, Any] = param.transpose(1 , 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
A_ : List[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
A_ : Tuple = param.view(*__A)
A_ : Any = param.transpose(0 , 1).contiguous()
A_ : str = param.view(*__A)
return param
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]):
A_ : Union[str, Any] = {}
# old versions did not store training args
A_ : int = input_state_dict.get("""args""" , __A)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
A_ : Tuple = ds_args.padded_vocab_size
A_ : Any = ds_args.max_position_embeddings
A_ : Dict = ds_args.hidden_size
A_ : Any = ds_args.num_layers
A_ : Dict = ds_args.num_attention_heads
A_ : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
A_ : Any = config.n_head
# The hidden_size per head.
A_ : Optional[int] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
A_ : str = input_state_dict['''checkpoint_version''']
else:
A_ : Any = 0.0
# The model.
A_ : Tuple = input_state_dict['''model''']
# The language model.
A_ : Any = model['''language_model''']
# The embeddings.
A_ : Union[str, Any] = lm['''embedding''']
# The word embeddings.
A_ : Optional[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
A_ : Dict = word_embeddings[: config.vocab_size, :]
A_ : str = word_embeddings
# The position embeddings.
A_ : int = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
A_ : List[str] = pos_embeddings.size(0)
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match')
# Store the position embeddings.
A_ : List[Any] = pos_embeddings
# The transformer.
A_ : Dict = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
A_ : Dict = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""")
# The simple map of names for "automated" rules.
A_ : List[str] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
A_ : Optional[Any] = layer_re.match(__A)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
A_ : List[Any] = int(m.group(1))
# The name of the operation.
A_ : Dict = m.group(2)
# Is it a weight or a bias?
A_ : Any = m.group(3)
# The name of the layer.
A_ : int = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm"""):
A_ : List[Any] = '''ln_1''' if op_name.startswith("""input""") else '''ln_2'''
A_ : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
A_ : List[str] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa)).view(
1 , 1 , __A , __A)
A_ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
A_ : Tuple = torch.tensor(-1E4 , dtype=torch.floataa)
A_ : int = masked_bias
A_ : Optional[Any] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A)
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
A_ : Tuple = out_val.transpose(0 , 1).contiguous()
# Store.
A_ : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
A_ : List[str] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A)
# Store. No change of shape.
A_ : Union[str, Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
A_ : List[str] = megatron_to_transformers[op_name]
A_ : Any = val.transpose(0 , 1)
# Copy the bias.
elif weight_or_bias == "bias":
A_ : str = megatron_to_transformers[op_name]
A_ : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
A_ : List[Any] = transformer['''final_layernorm.weight''']
A_ : str = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
A_ : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase ( ):
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""")
parser.add_argument(
"""path_to_checkpoint""" , type=__A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__A , help="""An optional config json file describing the pre-trained model.""" , )
A_ : Dict = parser.parse_args()
# Extract the basename.
A_ : Dict = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}')
if args.path_to_checkpoint.endswith(""".zip"""):
with zipfile.ZipFile(args.path_to_checkpoint , """r""") as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""") as pytorch_dict:
A_ : Optional[Any] = torch.load(__A , map_location="""cpu""")
else:
A_ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""")
A_ : List[Any] = input_state_dict.get("""args""" , __A)
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
A_ : Optional[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
A_ : Tuple = '''gelu_new'''
else:
A_ : str = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
A_ : Dict = '''gelu_new'''
# Spell out all parameters in case the defaults change.
A_ : List[str] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__A , summary_activation=__A , summary_proj_to_labels=__A , summary_first_dropout=0.1 , scale_attn_weights=__A , use_cache=__A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
A_ : List[Any] = GPTaConfig.from_json_file(args.config_file)
A_ : Any = ['''GPT2LMHeadModel''']
# Convert.
print("""Converting""")
A_ : Dict = convert_megatron_checkpoint(__A , __A , __A)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__A , __A)
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
A_ : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
A_ : Optional[int] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
A_ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}')
else:
A_ : List[Any] = '''gpt2'''
A_ : Optional[int] = AutoTokenizer.from_pretrained(__A)
A_ : Any = type(__A).__name__
A_ : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""")
config.save_pretrained(__A)
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files')
tokenizer.save_pretrained(__A)
# Store the state_dict to file.
A_ : Any = os.path.join(__A , """pytorch_model.bin""")
print(F'Saving checkpoint to "{output_checkpoint_file}"')
torch.save(__A , __A)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Tuple=True):
model.train()
A_ : int = model(__lowerCAmelCase)
A_ : Any = F.mse_loss(__lowerCAmelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any=False):
set_seed(42)
A_ : List[Any] = RegressionModel()
A_ : str = deepcopy(__lowerCAmelCase)
A_ : Dict = RegressionDataset(length=80)
A_ : str = DataLoader(__lowerCAmelCase , batch_size=16)
model.to(accelerator.device)
if sched:
A_ : Any = AdamW(params=model.parameters() , lr=1E-3)
A_ : str = AdamW(params=ddp_model.parameters() , lr=1E-3)
A_ : List[Any] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda lowerCamelCase: epoch**0.65)
A_ : Optional[Any] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
A_ , A_ , A_ , A_ : int = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
else:
A_ , A_ : List[str] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase ( lowerCamelCase : Any):
# Test when on a single CPU or GPU that the context manager does nothing
A_ , A_ , A_ : List[Any] = get_training_setup(__lowerCAmelCase)
# Use a single batch
A_ , A_ : Union[str, Any] = next(iter(__lowerCAmelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
A_ , A_ : List[Any] = accelerator.gather((ddp_input, ddp_target))
A_ , A_ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
A_ : List[str] = ddp_input[torch.randperm(len(__lowerCAmelCase))]
def lowerCamelCase ( lowerCamelCase : Tuple):
# Test on distributed setup that context manager behaves properly
A_ , A_ , A_ : List[str] = get_training_setup(__lowerCAmelCase)
# Use a single batch
A_ , A_ : Union[str, Any] = next(iter(__lowerCAmelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
A_ , A_ : Dict = accelerator.gather((ddp_input, ddp_target))
A_ , A_ : List[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
A_ : Optional[int] = ddp_input[torch.randperm(len(__lowerCAmelCase))]
def lowerCamelCase ( lowerCamelCase : int=False , lowerCamelCase : Optional[int]=False):
A_ : Tuple = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
A_ , A_ , A_ : Dict = get_training_setup(__lowerCAmelCase)
for iteration, batch in enumerate(__lowerCAmelCase):
A_ , A_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ : str = accelerator.gather((ddp_input, ddp_target))
A_ , A_ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
A_ : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCAmelCase))]
GradientState._reset_state()
def lowerCamelCase ( lowerCamelCase : Optional[Any]=False , lowerCamelCase : Union[str, Any]=False):
A_ : str = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
A_ , A_ , A_ , A_ , A_ , A_ , A_ : List[Any] = get_training_setup(__lowerCAmelCase , __lowerCAmelCase)
for iteration, batch in enumerate(__lowerCAmelCase):
A_ , A_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
A_ , A_ : int = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
A_ : Any = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowerCamelCase ( ):
A_ : List[Any] = Accelerator()
A_ : Dict = RegressionDataset(length=80)
A_ : Any = DataLoader(__lowerCAmelCase , batch_size=16)
A_ : int = RegressionDataset(length=96)
A_ : Any = DataLoader(__lowerCAmelCase , batch_size=16)
A_ , A_ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(__lowerCAmelCase)
if iteration < len(__lowerCAmelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(__lowerCAmelCase)
if batch_num < len(__lowerCAmelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase ( ):
A_ : int = Accelerator()
A_ : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""")
test_noop_sync(__lowerCAmelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""")
test_distributed_sync(__lowerCAmelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase)
def lowerCamelCase ( lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : Optional[int]=None ,**_a : Optional[Any] ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A_ : List[str] = model
A_ : Tuple = kwargs.get("""model_save_dir""" ,UpperCamelCase_ )
A_ : int = kwargs.get("""latest_model_name""" ,UpperCamelCase_ )
def __call__( self : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ ,UpperCamelCase_ )
@staticmethod
def _a ( _a : Tuple ,_a : List[Any]=None ,_a : Tuple=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A_ : Dict = "CPUExecutionProvider"
return ort.InferenceSession(UpperCamelCase_ ,providers=[provider] ,sess_options=UpperCamelCase_ )
def _a ( self : str ,_a : Tuple ,_a : Union[str, Any] = None ,**_a : List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A_ : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
A_ : str = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ ,UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A_ : Optional[int] = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
A_ : int = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ ,UpperCamelCase_ )
except shutil.SameFileError:
pass
def _a ( self : Dict ,_a : Tuple ,**_a : Any ,):
'''simple docstring'''
if os.path.isfile(UpperCamelCase_ ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ ,**UpperCamelCase_ )
@classmethod
def _a ( cls : str ,_a : List[str] ,_a : int = None ,_a : List[Any] = None ,_a : List[Any] = False ,_a : List[Any] = None ,_a : Union[str, Any] = None ,_a : int = None ,_a : str = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
A_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) ,provider=UpperCamelCase_ ,sess_options=UpperCamelCase_ )
A_ : Tuple = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
A_ : Tuple = hf_hub_download(
repo_id=UpperCamelCase_ ,filename=UpperCamelCase_ ,use_auth_token=UpperCamelCase_ ,revision=UpperCamelCase_ ,cache_dir=UpperCamelCase_ ,force_download=UpperCamelCase_ ,)
A_ : Optional[Any] = Path(UpperCamelCase_ ).parent
A_ : int = Path(UpperCamelCase_ ).name
A_ : Optional[Any] = OnnxRuntimeModel.load_model(UpperCamelCase_ ,provider=UpperCamelCase_ ,sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ ,**UpperCamelCase_ )
@classmethod
def _a ( cls : Dict ,_a : int ,_a : Any = True ,_a : Any = None ,_a : List[Any] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Dict = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
A_ : int = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ ,revision=UpperCamelCase_ ,cache_dir=UpperCamelCase_ ,force_download=UpperCamelCase_ ,use_auth_token=UpperCamelCase_ ,**UpperCamelCase_ ,)
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = 'Hello, World!'
__magic_name__ = 'en_XX'
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : bool):
A_ : Dict = Path("""data_bin""")
A_ : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase).parent) , checkpoint_file=Path(_lowerCAmelCase).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase).parent / """sentencepiece.bpe.model""") , src_dict=str(data_dir / """dict.txt""") , )
xmod.eval() # disable dropout
print(_lowerCAmelCase)
A_ : Tuple = xmod.model.encoder.sentence_encoder
A_ : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A_ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase)
A_ : Optional[Any] = XmodForSequenceClassification(_lowerCAmelCase) if classification_head else XmodForMaskedLM(_lowerCAmelCase)
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : str = xmod_sent_encoder.embed_tokens.weight
A_ : List[Any] = xmod_sent_encoder.embed_positions.weight
A_ : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
A_ : Any = xmod_sent_encoder.layernorm_embedding.weight
A_ : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
A_ : Optional[Any] = model.roberta.encoder.layer[i]
A_ : str = xmod_sent_encoder.layers[i]
# self attention
A_ : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError("""Dimensions of self-attention weights do not match.""")
A_ : Tuple = xmod_layer.self_attn.q_proj.weight
A_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
A_ : Tuple = xmod_layer.self_attn.k_proj.weight
A_ : Optional[int] = xmod_layer.self_attn.k_proj.bias
A_ : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
A_ : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A_ : Dict = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""")
A_ : List[Any] = xmod_layer.self_attn.out_proj.weight
A_ : Union[str, Any] = xmod_layer.self_attn.out_proj.bias
A_ : str = xmod_layer.self_attn_layer_norm.weight
A_ : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
A_ : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""")
A_ : Any = xmod_layer.fca.weight
A_ : Dict = xmod_layer.fca.bias
# output
A_ : Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""")
A_ : str = xmod_layer.fca.weight
A_ : Any = xmod_layer.fca.bias
A_ : List[str] = xmod_layer.final_layer_norm.weight
A_ : List[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A_ : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
A_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError("""Lists of language adapters do not match.""")
for lang_code, adapter in xmod_layer.adapter_modules.items():
A_ : Optional[int] = bert_output.adapter_modules[lang_code]
A_ : List[Any] = xmod_layer.adapter_modules[lang_code]
A_ : int = from_adapter.fca.weight
A_ : Dict = from_adapter.fca.bias
A_ : int = from_adapter.fca.weight
A_ : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A_ : str = xmod_sent_encoder.layer_norm.weight
A_ : Optional[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
A_ : int = xmod.model.classification_heads["mnli"].dense.weight
A_ : Any = xmod.model.classification_heads["mnli"].dense.bias
A_ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
A_ : Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A_ : Any = xmod.model.encoder.lm_head.dense.weight
A_ : Any = xmod.model.encoder.lm_head.dense.bias
A_ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
A_ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
A_ : Optional[int] = xmod.model.encoder.lm_head.weight
A_ : List[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : Any = xmod.encode(_lowerCAmelCase).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase)
A_ : int = model(_lowerCAmelCase)[0]
if classification_head:
A_ : List[str] = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase))
else:
A_ : str = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
A_ : Dict = torch.max(torch.abs(our_output - their_output)).item()
print(F'max_absolute_diff = {max_absolute_diff}') # ~ 1e-7
A_ : Dict = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3)
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""")
if not success:
raise Exception("""Something went wRoNg""")
Path(_lowerCAmelCase).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase)
print(F'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__magic_name__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase ( lowerCamelCase : Optional[int] = 100_0000 , lowerCamelCase : List[str] = 10):
A_ : defaultdict = defaultdict(__lowerCAmelCase)
for outer_width in range(3 , (t_limit // 4) + 2):
if outer_width * outer_width > t_limit:
A_ : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit)) , 1)
else:
A_ : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Dict):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True])
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple):
A_ : Optional[int] = tmp_path / """cache"""
A_ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[int] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase)
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str]):
A_ : List[str] = tmp_path / """cache"""
A_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Dict = features.copy() if features else default_expected_features
A_ : Optional[int] = (
Features({feature: Value(__UpperCAmelCase) for feature, dtype in features.items()}) if features is not None else None
)
A_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase)
def lowerCamelCase ( lowerCamelCase : Optional[int]):
with contextlib.closing(sqlitea.connect(__UpperCAmelCase)) as con:
A_ : List[Any] = con.cursor()
cur.execute("""SELECT * FROM dataset""")
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]):
A_ : Any = tmp_path / """cache"""
A_ : Optional[int] = os.path.join(__UpperCAmelCase , """tmp.sql""")
A_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase).read()
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1).write()
A_ : Dict = iter_sql_file(__UpperCAmelCase)
A_ : Dict = iter_sql_file(__UpperCAmelCase)
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict):
A_ : List[str] = tmp_path / """cache"""
A_ : List[str] = os.path.join(__UpperCAmelCase , """tmp.sql""")
A_ : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase).read()
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2).write()
A_ : List[str] = iter_sql_file(__UpperCAmelCase)
A_ : Tuple = iter_sql_file(__UpperCAmelCase)
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]):
A_ : Any = tmp_path / """cache"""
A_ : List[str] = os.path.join(__UpperCAmelCase , """tmp.sql""")
A_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase).read()
with pytest.raises(__UpperCAmelCase):
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0).write()
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__magic_name__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__magic_name__ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
__magic_name__ = 0
for log in Path().glob('*.log'):
__magic_name__ = 0
with open(log, 'r') as f:
for line in f:
__magic_name__ = json.loads(line)
if line.get('nodeid', '') != "":
__magic_name__ = line['nodeid']
if line.get('duration', None) is not None:
__magic_name__ = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__magic_name__ = []
log.unlink()
__magic_name__ = ''
__magic_name__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__magic_name__ = []
__magic_name__ = {}
for test in failed_tests:
__magic_name__ = test[0].split('::')
__magic_name__ = data[0].split('/')[-1]
if data[0] not in filesafailed:
__magic_name__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__magic_name__ = [test[0] for test in failed_table]
__magic_name__ = list(set(files))
# Count number of instances in failed_tests
__magic_name__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__magic_name__ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
__magic_name__ = 'Too many failed tests, please see the full report in the Action results.'
__magic_name__ = len(err) + 10
__magic_name__ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
__magic_name__ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__magic_name__ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__magic_name__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__magic_name__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__magic_name__ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__magic_name__ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__magic_name__ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__magic_name__ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__magic_name__ = row[0]
else:
__magic_name__ = ''
__magic_name__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetImgaImgPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _a ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Dict ):
'''simple docstring'''
return 100
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : List[Any] = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = self.dummy_unet
A_ : Dict = self.dummy_movq
A_ : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ : Dict = DDIMScheduler(**_UpperCamelCase )
A_ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Tuple ,_a : List[Any] ,_a : Any=0 ):
'''simple docstring'''
A_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
A_ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
A_ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
A_ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : int = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if str(_UpperCamelCase ).startswith("""mps""" ):
A_ : Any = torch.manual_seed(_UpperCamelCase )
else:
A_ : Any = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
A_ : List[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : str = """cpu"""
A_ : Tuple = self.get_dummy_components()
A_ : Union[str, Any] = self.pipeline_class(**_UpperCamelCase )
A_ : str = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
A_ : List[str] = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
A_ : Any = output.images
A_ : Dict = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : str = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
A_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : int = torch.from_numpy(np.array(_UpperCamelCase ) ).float() / 255.0
A_ : Dict = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : Tuple = """A robot, 4k photo"""
A_ : Optional[int] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
A_ : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : List[str] = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
A_ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : Union[str, Any] = pipe_prior(
_UpperCamelCase ,image=_UpperCamelCase ,strength=0.85 ,generator=_UpperCamelCase ,negative_prompt="""""" ,).to_tuple()
A_ : Optional[Any] = pipeline(
image=_UpperCamelCase ,image_embeds=_UpperCamelCase ,negative_image_embeds=_UpperCamelCase ,hint=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=100 ,height=512 ,width=512 ,strength=0.5 ,output_type="""np""" ,)
A_ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_UpperCamelCase ,_UpperCamelCase )
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__magic_name__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__magic_name__ = 'main'
# Default branch name
__magic_name__ = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
__magic_name__ = 'aaaaaaa'
# This commit does not exist, so we should 404.
__magic_name__ = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
__magic_name__ = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCamelCase ( ):
print("""Welcome!""")
yield
print("""Bye!""")
@contextlib.contextmanager
def lowerCamelCase ( ):
print("""Bonjour!""")
yield
print("""Au revoir!""")
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("""sys.stdout""" ,new_callable=io.StringIO )
def _a ( self : int ,_a : int ):
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,"""Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" ,new_callable=io.StringIO )
def _a ( self : Any ,_a : Optional[int] ):
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,"""Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" ,new_callable=io.StringIO )
def _a ( self : Optional[Any] ,_a : Dict ):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,"""Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _a ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(_A ) ,["""labels"""] )
self.assertEqual(find_labels(_A ) ,["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_A ) ,["""start_positions""", """end_positions"""] )
class __lowerCAmelCase ( a__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_A ) ,["""labels"""] )
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(_A ) ,["""labels"""] )
self.assertEqual(find_labels(_A ) ,["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_A ) ,["""start_positions""", """end_positions"""] )
class __lowerCAmelCase ( a__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_A ) ,["""labels"""] )
@require_flax
def _a ( self : List[str] ):
'''simple docstring'''
self.assertEqual(find_labels(_A ) ,[] )
self.assertEqual(find_labels(_A ) ,[] )
self.assertEqual(find_labels(_A ) ,[] )
class __lowerCAmelCase ( a__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_A ) ,[] )
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Dict ,**_a : int ):
'''simple docstring'''
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
def _a ( self : List[str] ,_a : List[str] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A_ : Dict = self.values[key]
def _a ( self : Any ):
'''simple docstring'''
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _a ( self : Optional[Any] ,_a : Any ,_a : List[Any]=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase ,__lowerCamelCase )
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = 1
A_ : Any = 3
A_ : Optional[int] = (32, 32)
A_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _a ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
def extract(*_a : Union[str, Any] ,**_a : List[str] ):
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
A_ : Any = torch.ones([0] )
def _a ( self : Any ,_a : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.dummy_cond_unet
A_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
A_ : Optional[int] = self.dummy_vae
A_ : Optional[int] = self.dummy_text_encoder
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
A_ : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Optional[int] = '''A painting of a squirrel eating a burger'''
A_ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A_ : List[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
A_ : Tuple = output.images
A_ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : int ):
'''simple docstring'''
A_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : int = self.dummy_cond_unet
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A_ : List[str] = self.dummy_vae
A_ : Optional[int] = self.dummy_text_encoder
A_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
A_ : Optional[int] = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : str = '''A painting of a squirrel eating a burger'''
A_ : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A_ : Optional[int] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
A_ : List[Any] = output.images
A_ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A_ : str = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : str ):
'''simple docstring'''
A_ : str = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
A_ : List[str] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
A_ : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ : List[str] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.dummy_cond_unet
A_ : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A_ : Dict = self.dummy_vae
A_ : List[Any] = self.dummy_text_encoder
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
A_ : Dict = unet.half()
A_ : Optional[int] = vae.half()
A_ : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
A_ : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : str = '''A painting of a squirrel eating a burger'''
A_ : Dict = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
A_ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A_ : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : int = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
A_ : Union[str, Any] = 4003660346
A_ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
A_ : Any = torch.manual_seed(lowerCamelCase_ )
A_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
A_ : Union[str, Any] = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
A_ : Any = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
A_ : str = torch.manual_seed(lowerCamelCase_ )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
A_ : str = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
A_ : Any = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
A_ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
A_ : Dict = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : int = '''padme amidala taking a bath artwork, safe for work, no nudity'''
A_ : List[Any] = 2734971755
A_ : str = 7
A_ : List[str] = torch.manual_seed(lowerCamelCase_ )
A_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
A_ : Optional[int] = output.images
A_ : Dict = image[0, -3:, -3:, -1]
A_ : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
A_ : Dict = torch.manual_seed(lowerCamelCase_ )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
A_ : List[Any] = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
A_ : int = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
A_ : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Tuple = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
A_ : List[str] = 1044355234
A_ : Any = 12
A_ : int = torch.manual_seed(lowerCamelCase_ )
A_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
A_ : Any = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
A_ : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
A_ : int = torch.manual_seed(lowerCamelCase_ )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
A_ : Union[str, Any] = output.images
A_ : Dict = image[0, -3:, -3:, -1]
A_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
A_ : Optional[Any] = 10
A_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""")),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""])),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
"""id""": datasets.Value("""int64"""),
})
A_ : str = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(_A)),
} , features=_A , )
return dataset
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple):
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """file.arrow""")
dataset.map(cache_file_name=_A)
return filename
# FILE_CONTENT + files
__magic_name__ = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.txt"""
A_ : Dict = FILE_CONTENT
with open(_A , """w""") as f:
f.write(_A)
return filename
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str):
import bza
A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.txt.bz2"""
A_ : Dict = bytes(_A , """utf-8""")
with bza.open(_A , """wb""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[str]):
import gzip
A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """file.txt.gz""")
A_ : List[str] = bytes(_A , """utf-8""")
with gzip.open(_A , """wb""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A_ : Any = tmp_path_factory.mktemp("""data""") / """file.txt.lz4"""
A_ : Tuple = bytes(_A , """utf-8""")
with lza.frame.open(_A , """wb""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : List[Any]):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A_ : Any = tmp_path_factory.mktemp("""data""") / """file.txt.7z"""
with pyazr.SevenZipFile(_A , """w""") as archive:
archive.write(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[str]):
import tarfile
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""") / """file.txt.tar"""
with tarfile.TarFile(_A , """w""") as f:
f.add(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[str]):
import lzma
A_ : List[Any] = tmp_path_factory.mktemp("""data""") / """file.txt.xz"""
A_ : List[Any] = bytes(_A , """utf-8""")
with lzma.open(_A , """wb""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str]):
import zipfile
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""") / """file.txt.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int]):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.txt.zst"""
A_ : Tuple = bytes(_A , """utf-8""")
with zstd.open(_A , """wb""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.xml"""
A_ : int = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""")
with open(_A , """w""") as f:
f.write(_A)
return filename
__magic_name__ = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__magic_name__ = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__magic_name__ = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__magic_name__ = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__magic_name__ = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : str = datasets.Dataset.from_dict(_A)
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.arrow""")
dataset.map(cache_file_name=_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Any = str(tmp_path_factory.mktemp("""data""") / """dataset.sqlite""")
with contextlib.closing(sqlitea.connect(_A)) as con:
A_ : Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""")
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Tuple = str(tmp_path_factory.mktemp("""data""") / """dataset.csv""")
with open(_A , """w""" , newline="""""") as f:
A_ : List[Any] = csv.DictWriter(_A , fieldnames=["""col_1""", """col_2""", """col_3"""])
writer.writeheader()
for item in DATA:
writer.writerow(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""") / """dataset2.csv""")
with open(_A , """w""" , newline="""""") as f:
A_ : Dict = csv.DictWriter(_A , fieldnames=["""col_1""", """col_2""", """col_3"""])
writer.writeheader()
for item in DATA:
writer.writerow(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[Any]):
import bza
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""") / """dataset.csv.bz2"""
with open(_A , """rb""") as f:
A_ : str = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_A , """wb""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int):
A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """dataset.csv.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename(_A))
f.write(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]):
A_ : Tuple = tmp_path_factory.mktemp("""data""") / """dataset.csv.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""")))
f.write(_A , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""")))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple):
A_ : int = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A)))
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : int = str(tmp_path_factory.mktemp("""data""") / """dataset.parquet""")
A_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
})
with open(_A , """wb""") as f:
A_ : Optional[Any] = pq.ParquetWriter(_A , schema=_A)
A_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_A))] for k in DATA[0]} , schema=_A)
writer.write_table(_A)
writer.close()
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : int = str(tmp_path_factory.mktemp("""data""") / """dataset.json""")
A_ : Union[str, Any] = {"""data""": DATA}
with open(_A , """w""") as f:
json.dump(_A , _A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.json""")
A_ : List[str] = {"""data""": DATA_DICT_OF_LISTS}
with open(_A , """w""") as f:
json.dump(_A , _A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""") / """dataset.jsonl""")
with open(_A , """w""") as f:
for item in DATA:
f.write(json.dumps(_A) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """dataset2.jsonl""")
with open(_A , """w""") as f:
for item in DATA:
f.write(json.dumps(_A) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Any = str(tmp_path_factory.mktemp("""data""") / """dataset_312.jsonl""")
with open(_A , """w""") as f:
for item in DATA_312:
f.write(json.dumps(_A) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any):
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset-str.jsonl""")
with open(_A , """w""") as f:
for item in DATA_STR:
f.write(json.dumps(_A) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]):
import gzip
A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """dataset.txt.gz""")
with open(_A , """rb""") as orig_file:
with gzip.open(_A , """wb""") as zipped_file:
zipped_file.writelines(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]):
import gzip
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.jsonl.gz""")
with open(_A , """rb""") as orig_file:
with gzip.open(_A , """wb""") as zipped_file:
zipped_file.writelines(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int]):
A_ : List[Any] = tmp_path_factory.mktemp("""data""") / """dataset.jsonl.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename(_A))
f.write(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Any):
A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.join("""nested""" , os.path.basename(_A)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]):
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A)))
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]):
A_ : int = tmp_path_factory.mktemp("""data""") / """dataset.jsonl.tar"""
with tarfile.TarFile(_A , """w""") as f:
f.add(_A , arcname=os.path.basename(_A))
f.add(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any]):
A_ : int = tmp_path_factory.mktemp("""data""") / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_A , """w""") as f:
f.add(_A , arcname=os.path.join("""nested""" , os.path.basename(_A)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : List[str] = ["""0""", """1""", """2""", """3"""]
A_ : str = str(tmp_path_factory.mktemp("""data""") / """dataset.txt""")
with open(_A , """w""") as f:
for item in data:
f.write(item + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : List[Any] = ["""0""", """1""", """2""", """3"""]
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset2.txt""")
with open(_A , """w""") as f:
for item in data:
f.write(item + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
A_ : Dict = tmp_path_factory.mktemp("""data""") / """dataset.abc"""
with open(_A , """w""") as f:
for item in data:
f.write(item + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]):
A_ : int = tmp_path_factory.mktemp("""data""") / """dataset.text.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename(_A))
f.write(_A , arcname=os.path.basename(_A))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]):
A_ : Dict = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A)))
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict):
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""") / """dataset.ext.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename("""unsupported.ext"""))
f.write(_A , arcname=os.path.basename("""unsupported_2.ext"""))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""])
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset_with_unicode_new_lines.txt""")
with open(_A , """w""" , encoding="""utf-8""") as f:
f.write(_A)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""")
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""")
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict):
A_ : int = tmp_path_factory.mktemp("""data""") / """dataset.img.zip"""
with zipfile.ZipFile(_A , """w""") as f:
f.write(_A , arcname=os.path.basename(_A))
f.write(_A , arcname=os.path.basename(_A).replace(""".jpg""" , """2.jpg"""))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Dict = tmp_path_factory.mktemp("""data_dir""")
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""") as f:
f.write("""foo\n""" * 10)
with open(data_dir / """subdir""" / """test.txt""" , """w""") as f:
f.write("""bar\n""" * 10)
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""") as f:
f.write("""bar\n""" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""") as f:
f.write("""foo\n""" * 10)
with open(data_dir / """.subdir""" / """test.txt""" , """w""") as f:
f.write("""bar\n""" * 10)
return data_dir
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""")
A_ : Any = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : Any = 1
if upper_limit > 0:
A_ : str = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1):
for j in range(lowerCamelCase):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__magic_name__ = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Dict=7 ,_a : Dict=3 ,_a : Optional[int]=18 ,_a : List[str]=30 ,_a : Union[str, Any]=400 ,_a : Optional[Any]=True ,_a : List[str]=None ,_a : Any=True ,):
'''simple docstring'''
A_ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
A_ : str = parent
A_ : List[str] = batch_size
A_ : str = num_channels
A_ : Dict = image_size
A_ : Dict = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = apply_ocr
def _a ( self : Dict ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
a_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self : int ):
'''simple docstring'''
A_ : Any = LayoutLMvaImageProcessingTester(self )
@property
def _a ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowercase ,"""size""" ) )
self.assertTrue(hasattr(_lowercase ,"""apply_ocr""" ) )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
A_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _a ( self : int ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,Image.Image )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_lowercase )
self.assertIsInstance(encoding.boxes ,_lowercase )
# Test batched
A_ : Union[str, Any] = image_processing(_lowercase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
A_ : Union[str, Any] = image_processing(_lowercase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : Dict ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,torch.Tensor )
# Test not batched input
A_ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
A_ : Dict = image_processing(_lowercase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : Dict = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
A_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
A_ : List[str] = image_processing(_lowercase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
A_ : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_lowercase )
self.assertListEqual(encoding.boxes ,_lowercase )
# with apply_OCR = False
A_ : int = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
A_ : int = image_processing(_lowercase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int=1E-12):
A_ : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1) , a_min=snake_case__)).T
A_ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1) , a_min=snake_case__)).T
return jnp.matmul(snake_case__ , norm_emb_a.T)
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = jnp.floataa
def _a ( self : Tuple ):
'''simple docstring'''
A_ : int = FlaxCLIPVisionModule(self.config.vision_config )
A_ : Optional[Any] = nn.Dense(self.config.projection_dim ,use_bias=A_ ,dtype=self.dtype )
A_ : Tuple = self.param("""concept_embeds""" ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
A_ : Union[str, Any] = self.param(
"""special_care_embeds""" ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
A_ : Optional[Any] = self.param("""concept_embeds_weights""" ,jax.nn.initializers.ones ,(17,) )
A_ : Tuple = self.param("""special_care_embeds_weights""" ,jax.nn.initializers.ones ,(3,) )
def __call__( self : Optional[int] ,_a : str ):
'''simple docstring'''
A_ : Tuple = self.vision_model(A_ )[1]
A_ : str = self.visual_projection(A_ )
A_ : List[str] = jax_cosine_distance(A_ ,self.special_care_embeds )
A_ : Union[str, Any] = jax_cosine_distance(A_ ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
A_ : str = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
A_ : Optional[int] = jnp.round(A_ ,3 )
A_ : Dict = jnp.any(special_scores > 0 ,axis=1 ,keepdims=A_ )
# Use a lower threshold if an image has any special care concept
A_ : int = is_special_care * 0.01
A_ : Optional[int] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
A_ : Optional[int] = jnp.round(A_ ,3 )
A_ : Optional[Any] = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class __lowerCAmelCase ( _lowercase ):
'''simple docstring'''
a_ = CLIPConfig
a_ = '''clip_input'''
a_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] ,_a : CLIPConfig ,_a : Optional[Tuple] = None ,_a : int = 0 ,_a : jnp.dtype = jnp.floataa ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
if input_shape is None:
A_ : List[Any] = (1, 224, 224, 3)
A_ : Any = self.module_class(config=A_ ,dtype=A_ ,**A_ )
super().__init__(A_ ,A_ ,input_shape=A_ ,seed=A_ ,dtype=A_ ,_do_init=_do_init )
def _a ( self : str ,_a : jax.random.KeyArray ,_a : Tuple ,_a : FrozenDict = None ):
'''simple docstring'''
A_ : List[str] = jax.random.normal(A_ ,A_ )
A_ , A_ : Optional[int] = jax.random.split(A_ )
A_ : Any = {"""params""": params_rng, """dropout""": dropout_rng}
A_ : Optional[int] = self.module.init(A_ ,A_ )["""params"""]
return random_params
def __call__( self : str ,_a : Optional[Any] ,_a : dict = None ,):
'''simple docstring'''
A_ : Optional[Any] = jnp.transpose(A_ ,(0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} ,jnp.array(A_ ,dtype=jnp.floataa ) ,rngs={} ,)
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class __lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
a_ = 'bert-generation'
def __init__( self : Any ,_a : Union[str, Any]=50358 ,_a : List[Any]=1024 ,_a : Dict=24 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=4096 ,_a : Dict="gelu" ,_a : Union[str, Any]=0.1 ,_a : int=0.1 ,_a : Any=512 ,_a : Optional[Any]=0.02 ,_a : List[str]=1e-12 ,_a : str=0 ,_a : int=2 ,_a : str=1 ,_a : int="absolute" ,_a : Any=True ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : str = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Dict = intermediate_size
A_ : Dict = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : str = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : Any = use_cache
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = 'ResNetConfig'
# Base docstring
__magic_name__ = 'microsoft/resnet-50'
__magic_name__ = [1, 2_048, 7, 7]
# Image classification docstring
__magic_name__ = 'microsoft/resnet-50'
__magic_name__ = 'tiger cat'
__magic_name__ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_a : int ,_a : str ,_a : List[Any] = 3 ,_a : Optional[Any] = 1 ,_a : List[Any] = "relu" ):
'''simple docstring'''
super().__init__()
A_ : Optional[Any] = nn.Convad(
_a ,_a ,kernel_size=_a ,stride=_a ,padding=kernel_size // 2 ,bias=_a )
A_ : List[Any] = nn.BatchNormad(_a )
A_ : int = ACTaFN[activation] if activation is not None else nn.Identity()
def _a ( self : List[Any] ,_a : List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.convolution(_a )
A_ : List[Any] = self.normalization(_a )
A_ : Optional[int] = self.activation(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : Tuple ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A_ : int = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A_ : Dict = config.num_channels
def _a ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
A_ : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
A_ : Any = self.embedder(_a )
A_ : str = self.pooler(_a )
return embedding
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : List[Any] ,_a : Any ,_a : Optional[int] = 2 ):
'''simple docstring'''
super().__init__()
A_ : List[Any] = nn.Convad(_a ,_a ,kernel_size=1 ,stride=_a ,bias=_a )
A_ : Optional[int] = nn.BatchNormad(_a )
def _a ( self : List[str] ,_a : int ):
'''simple docstring'''
A_ : Any = self.convolution(_a )
A_ : Union[str, Any] = self.normalization(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : List[str] ,_a : Any ,_a : Dict = 1 ,_a : Optional[Any] = "relu" ):
'''simple docstring'''
super().__init__()
A_ : Dict = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = (
ResNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
A_ : Tuple = nn.Sequential(
ResNetConvLayer(_a ,_a ,stride=_a ) ,ResNetConvLayer(_a ,_a ,activation=_a ) ,)
A_ : int = ACTaFN[activation]
def _a ( self : List[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = hidden_state
A_ : Optional[int] = self.layer(_a )
A_ : Any = self.shortcut(_a )
hidden_state += residual
A_ : Tuple = self.activation(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : List[str] ,_a : int ,_a : Any = 1 ,_a : Union[str, Any] = "relu" ,_a : str = 4 ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = in_channels != out_channels or stride != 1
A_ : List[Any] = out_channels // reduction
A_ : Dict = (
ResNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
A_ : Dict = nn.Sequential(
ResNetConvLayer(_a ,_a ,kernel_size=1 ) ,ResNetConvLayer(_a ,_a ,stride=_a ) ,ResNetConvLayer(_a ,_a ,kernel_size=1 ,activation=_a ) ,)
A_ : Union[str, Any] = ACTaFN[activation]
def _a ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = hidden_state
A_ : List[str] = self.layer(_a )
A_ : Any = self.shortcut(_a )
hidden_state += residual
A_ : int = self.activation(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_a : str ,_a : str ,_a : int ,_a : Optional[Any] = 2 ,_a : List[Any] = 2 ,):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
A_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_a ,_a ,stride=_a ,activation=config.hidden_act ) ,*[layer(_a ,_a ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def _a ( self : Optional[int] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = input
for layer in self.layers:
A_ : str = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_a : str ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_a ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A_ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a ,config.depths[1:] ):
self.stages.append(ResNetStage(_a ,_a ,_a ,depth=_a ) )
def _a ( self : Tuple ,_a : Optional[Any] ,_a : Optional[Any] = False ,_a : Tuple = True ):
'''simple docstring'''
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : Tuple = stage_module(_a )
if output_hidden_states:
A_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_a ,hidden_states=_a ,)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ResNetConfig
a_ = """resnet"""
a_ = """pixel_values"""
a_ = True
def _a ( self : List[Any] ,_a : int ):
'''simple docstring'''
if isinstance(_a ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(_a ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def _a ( self : str ,_a : List[Any] ,_a : Tuple=False ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : Tuple = value
__magic_name__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__magic_name__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,_a : Dict ):
'''simple docstring'''
super().__init__(_a )
A_ : str = config
A_ : Optional[int] = ResNetEmbeddings(_a )
A_ : Union[str, Any] = ResNetEncoder(_a )
A_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _a ( self : str ,_a : str ,_a : str = None ,_a : Optional[int] = None ):
'''simple docstring'''
A_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.embedder(_a )
A_ : List[Any] = self.encoder(
_a ,output_hidden_states=_a ,return_dict=_a )
A_ : str = encoder_outputs[0]
A_ : Optional[Any] = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a ,pooler_output=_a ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(_a )
A_ : Dict = config.num_labels
A_ : List[Any] = ResNetModel(_a )
# classification head
A_ : Optional[Any] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _a ( self : Union[str, Any] ,_a : Union[str, Any] = None ,_a : Optional[int] = None ,_a : List[str] = None ,_a : str = None ,):
'''simple docstring'''
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[str] = self.resnet(_a ,output_hidden_states=_a ,return_dict=_a )
A_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
A_ : str = self.classifier(_a )
A_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Optional[int] = """single_label_classification"""
else:
A_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
A_ : Optional[int] = MSELoss()
if self.num_labels == 1:
A_ : List[str] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A_ : Tuple = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
A_ : Tuple = CrossEntropyLoss()
A_ : Tuple = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : List[Any] = BCEWithLogitsLoss()
A_ : Optional[Any] = loss_fct(_a ,_a )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a ,logits=_a ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
super().__init__(_a )
super()._init_backbone(_a )
A_ : List[str] = [config.embedding_size] + config.hidden_sizes
A_ : Tuple = ResNetEmbeddings(_a )
A_ : Optional[Any] = ResNetEncoder(_a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@replace_return_docstrings(output_type=_a ,config_class=_CONFIG_FOR_DOC )
def _a ( self : str ,_a : Any ,_a : List[Any] = None ,_a : Optional[Any] = None ):
'''simple docstring'''
A_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : List[Any] = self.embedder(_a )
A_ : List[str] = self.encoder(_a ,output_hidden_states=_a ,return_dict=_a )
A_ : Optional[int] = outputs.hidden_states
A_ : int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A_ : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_a ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=_a ,)
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : bool = False):
if not isinstance(snake_case_ , snake_case_):
A_ : List[str] = F'Expected string as input, found {type(snake_case_)}'
raise ValueError(snake_case_)
if not isinstance(snake_case_ , snake_case_):
A_ : Tuple = F'Expected boolean as use_pascal parameter, found {type(snake_case_)}'
raise ValueError(snake_case_)
A_ : Tuple = input_str.split("""_""")
A_ : str = 0 if use_pascal else 1
A_ : Optional[int] = words[start_index:]
A_ : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
A_ : List[str] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words])
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
A_ : str = emb.weight.shape
A_ : Optional[Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
A_ : List[str] = emb.weight.data
return lin_layer
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str]=None):
A_ : Optional[Any] = {}
for old_key in state_dict.keys():
A_ : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A_ : Dict = key.replace("""moe_layer.experts.0""" , F'ffn.experts.expert_{expert_idx}')
else:
A_ : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
A_ : Union[str, Any] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
A_ : List[str] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
A_ : int = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
A_ : List[Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
A_ : Tuple = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
A_ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""")
A_ : Dict = state_dict[old_key]
return new_dict
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str = WEIGHTS_NAME):
A_ : Any = []
A_ : str = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
for expert in range(_lowerCamelCase):
A_ : Dict = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(_lowerCamelCase):
A_ : Optional[int] = torch.load(_lowerCamelCase)["model"]
remove_ignore_keys_(_lowerCamelCase)
A_ : Union[str, Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase)
A_ : Tuple = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'-{len(_lowerCamelCase)+1:05d}-of-???.bin'))
torch.save(_lowerCamelCase , _lowerCamelCase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_lowerCamelCase)[0]].dtype)
# Add the last block
A_ : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'-{len(_lowerCamelCase)+1:05d}-of-???.bin'))
A_ : List[Any] = torch.load(switch_checkpoint_path + """-shared.pt""")["model"]
remove_ignore_keys_(_lowerCamelCase)
A_ : Dict = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase)
A_ : Any = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase) == 1:
A_ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
torch.save(_lowerCamelCase , _lowerCamelCase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase)
# Otherwise, let's build the index
A_ : Union[str, Any] = {}
for idx, shard in enumerate(_lowerCamelCase):
A_ : Optional[Any] = weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-{len(_lowerCamelCase):05d}.bin')
A_ : Any = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-???.bin'))
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase))
for key in shard:
A_ : Dict = shard_file
# Add the metadata
A_ : List[str] = {"total_size": total_size}
A_ : Union[str, Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase) , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase) + "\n"
f.write(_lowerCamelCase)
return metadata, index
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__magic_name__ = parser.parse_args()
__magic_name__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__magic_name__ = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__magic_name__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__magic_name__ = 'docs/source/en/_toctree.yml'
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Any = defaultdict(snake_case__)
for doc in model_doc:
counts[doc["local"]] += 1
A_ : Any = [key for key, value in counts.items() if value > 1]
A_ : Optional[Any] = []
for duplicate_key in duplicates:
A_ : Dict = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(snake_case__) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(snake_case__ , key=lambda lowerCamelCase: s["title"].lower())
def lowerCamelCase ( lowerCamelCase : Optional[int]=False):
with open(snake_case__ , encoding="""utf-8""") as f:
A_ : Union[str, Any] = yaml.safe_load(f.read())
# Get to the API doc
A_ : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A_ : List[Any] = content[api_idx]["""sections"""]
# Then to the model doc
A_ : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A_ : List[str] = api_doc[model_idx]["""sections"""]
A_ : List[str] = [(idx, section) for idx, section in enumerate(snake_case__) if """sections""" in section]
A_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
A_ : int = modality_doc["""sections"""]
A_ : Tuple = clean_model_doc_toc(snake_case__)
if old_modality_doc != new_modality_doc:
A_ : Optional[Any] = True
if overwrite:
A_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
A_ : Optional[int] = model_doc
A_ : Union[str, Any] = api_doc
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__magic_name__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __get__( self : str ,_a : Any ,_a : Any=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
A_ : Dict = """__cached_""" + self.fget.__name__
A_ : int = getattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if cached is None:
A_ : List[Any] = self.fget(__lowerCAmelCase )
setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return cached
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}')
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
if is_torch_fx_proxy(__snake_case):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer)):
return True
return isinstance(__snake_case , np.ndarray)
def lowerCamelCase ( lowerCamelCase : str):
return isinstance(__snake_case , np.ndarray)
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return _is_numpy(__snake_case)
def lowerCamelCase ( lowerCamelCase : Any):
import torch
return isinstance(__snake_case , torch.Tensor)
def lowerCamelCase ( lowerCamelCase : List[Any]):
return False if not is_torch_available() else _is_torch(__snake_case)
def lowerCamelCase ( lowerCamelCase : Any):
import torch
return isinstance(__snake_case , torch.device)
def lowerCamelCase ( lowerCamelCase : Tuple):
return False if not is_torch_available() else _is_torch_device(__snake_case)
def lowerCamelCase ( lowerCamelCase : Tuple):
import torch
if isinstance(__snake_case , __snake_case):
if hasattr(__snake_case , __snake_case):
A_ : Optional[int] = getattr(__snake_case , __snake_case)
else:
return False
return isinstance(__snake_case , torch.dtype)
def lowerCamelCase ( lowerCamelCase : Tuple):
return False if not is_torch_available() else _is_torch_dtype(__snake_case)
def lowerCamelCase ( lowerCamelCase : List[str]):
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor)
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return False if not is_tf_available() else _is_tensorflow(__snake_case)
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , """is_symbolic_tensor"""):
return tf.is_symbolic_tensor(__snake_case)
return type(__snake_case) == tf.Tensor
def lowerCamelCase ( lowerCamelCase : Tuple):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case)
def lowerCamelCase ( lowerCamelCase : int):
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray)
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
return False if not is_flax_available() else _is_jax(__snake_case)
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
if isinstance(__snake_case , (dict, UserDict)):
return {k: to_py_obj(__snake_case) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple)):
return [to_py_obj(__snake_case) for o in obj]
elif is_tf_tensor(__snake_case):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case):
return np.asarray(__snake_case).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase ( lowerCamelCase : List[str]):
if isinstance(__snake_case , (dict, UserDict)):
return {k: to_numpy(__snake_case) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple)):
return np.array(__snake_case)
elif is_tf_tensor(__snake_case):
return obj.numpy()
elif is_torch_tensor(__snake_case):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case):
return np.asarray(__snake_case)
else:
return obj
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : Dict ):
'''simple docstring'''
A_ : int = fields(self )
# Safety and consistency checks
if not len(__lowerCAmelCase ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
A_ : Optional[int] = getattr(self ,class_fields[0].name )
A_ : List[str] = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
A_ : Dict = first_field.items()
A_ : List[str] = True
else:
try:
A_ : Tuple = iter(__lowerCAmelCase )
A_ : List[Any] = True
except TypeError:
A_ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase ,(list, tuple) )
or not len(__lowerCAmelCase ) == 2
or not isinstance(element[0] ,__lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A_ : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
A_ : str = element[1]
elif first_field is not None:
A_ : Dict = first_field
else:
for field in class_fields:
A_ : Union[str, Any] = getattr(self ,field.name )
if v is not None:
A_ : Any = v
def __delitem__( self : Any ,*_a : Optional[int] ,**_a : Tuple ):
'''simple docstring'''
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _a ( self : List[str] ,*_a : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _a ( self : Dict ,*_a : Union[str, Any] ,**_a : Any ):
'''simple docstring'''
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _a ( self : List[str] ,*_a : Dict ,**_a : Optional[Any] ):
'''simple docstring'''
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Tuple ,_a : str ):
'''simple docstring'''
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
A_ : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] ,_a : List[str] ,_a : List[str] ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCAmelCase ,__lowerCAmelCase )
super().__setattr__(__lowerCAmelCase ,__lowerCAmelCase )
def __setitem__( self : int ,_a : List[str] ,_a : int ):
'''simple docstring'''
super().__setitem__(__lowerCAmelCase ,__lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCAmelCase ,__lowerCAmelCase )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def _a ( cls : Optional[Any] ,_a : Dict ):
'''simple docstring'''
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """longest"""
a_ = """max_length"""
a_ = """do_not_pad"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """pt"""
a_ = """tf"""
a_ = """np"""
a_ = """jax"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
A_ : List[str] = context_managers
A_ : Dict = ExitStack()
def __enter__( self : Dict ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCAmelCase )
def __exit__( self : Any ,*_a : List[Any] ,**_a : List[str] ):
'''simple docstring'''
self.stack.__exit__(*__lowerCAmelCase ,**__lowerCAmelCase )
def lowerCamelCase ( lowerCamelCase : Optional[int]):
A_ : str = infer_framework(__snake_case)
if framework == "tf":
A_ : Union[str, Any] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
A_ : Any = inspect.signature(model_class.forward) # PyTorch models
else:
A_ : Tuple = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : Optional[Any] = model_class.__name__
A_ : List[str] = infer_framework(__snake_case)
if framework == "tf":
A_ : Optional[int] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
A_ : str = inspect.signature(model_class.forward) # PyTorch models
else:
A_ : List[str] = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : str = "" , lowerCamelCase : Tuple = "."):
def _flatten_dict(lowerCamelCase : Dict , lowerCamelCase : List[Any]="" , lowerCamelCase : Dict="."):
for k, v in d.items():
A_ : Tuple = str(__snake_case) + delimiter + str(__snake_case) if parent_key else k
if v and isinstance(__snake_case , __snake_case):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case))
@contextmanager
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] = False):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None):
if is_numpy_array(__snake_case):
return np.transpose(__snake_case , axes=__snake_case)
elif is_torch_tensor(__snake_case):
return array.T if axes is None else array.permute(*__snake_case)
elif is_tf_tensor(__snake_case):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case)
elif is_jax_tensor(__snake_case):
return jnp.transpose(__snake_case , axes=__snake_case)
else:
raise ValueError(F'Type not supported for transpose: {type(__snake_case)}.')
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Any):
if is_numpy_array(__snake_case):
return np.reshape(__snake_case , __snake_case)
elif is_torch_tensor(__snake_case):
return array.reshape(*__snake_case)
elif is_tf_tensor(__snake_case):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case)
elif is_jax_tensor(__snake_case):
return jnp.reshape(__snake_case , __snake_case)
else:
raise ValueError(F'Type not supported for reshape: {type(__snake_case)}.')
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : str=None):
if is_numpy_array(__snake_case):
return np.squeeze(__snake_case , axis=__snake_case)
elif is_torch_tensor(__snake_case):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case)
elif is_tf_tensor(__snake_case):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case)
elif is_jax_tensor(__snake_case):
return jnp.squeeze(__snake_case , axis=__snake_case)
else:
raise ValueError(F'Type not supported for squeeze: {type(__snake_case)}.')
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : int):
if is_numpy_array(__snake_case):
return np.expand_dims(__snake_case , __snake_case)
elif is_torch_tensor(__snake_case):
return array.unsqueeze(dim=__snake_case)
elif is_tf_tensor(__snake_case):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case)
elif is_jax_tensor(__snake_case):
return jnp.expand_dims(__snake_case , axis=__snake_case)
else:
raise ValueError(F'Type not supported for expand_dims: {type(__snake_case)}.')
def lowerCamelCase ( lowerCamelCase : Tuple):
if is_numpy_array(__snake_case):
return np.size(__snake_case)
elif is_torch_tensor(__snake_case):
return array.numel()
elif is_tf_tensor(__snake_case):
import tensorflow as tf
return tf.size(__snake_case)
elif is_jax_tensor(__snake_case):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(__snake_case)}.')
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]):
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list)):
A_ : Optional[int] = [F'{repo_id}--{v}' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
A_ : Tuple = F'{repo_id}--{value}'
return auto_map
def lowerCamelCase ( lowerCamelCase : Tuple):
for base_class in inspect.getmro(__snake_case):
A_ : str = base_class.__module__
A_ : Tuple = base_class.__name__
if module.startswith("""tensorflow""") or module.startswith("""keras""") or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""") or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""") or module.startswith("""jax""") or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.')
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
'''simple docstring'''
def _a ( self : Dict ,_a : Optional[Any] ,_a : Any ,_a : int ):
'''simple docstring'''
return None
class __lowerCAmelCase :
'''simple docstring'''
def _a ( self : int ,_a : Dict ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ ,"""tf""" ,12 ,**lowercase_ )
@require_torch
@slow
def _a ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ ,"""pt""" ,12 ,**lowercase_ )
@require_torch
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
from transformers import BertModel
A_ : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowercase_ ) )
vocab_file.flush()
A_ : str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ : Any = BertModel(BertConfig(vocab_size=len(lowercase_ ) ) )
model.save_pretrained(lowercase_ )
self._test_export(lowercase_ ,"""pt""" ,12 ,lowercase_ )
@require_tf
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Optional[int] = self._test_export(lowercase_ ,"""tf""" ,12 ,**lowercase_ )
A_ : Dict = quantize(Path(lowercase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _a ( self : Any ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Union[str, Any] = self._test_export(lowercase_ ,"""pt""" ,12 ,**lowercase_ )
A_ : List[Any] = quantize(lowercase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _a ( self : Dict ,_a : List[str] ,_a : int ,_a : List[str] ,_a : List[str]=None ,**_a : Tuple ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ : List[str] = Path(lowercase_ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ )
return path
except Exception as e:
self.fail(lowercase_ )
@require_torch
@require_tokenizers
@slow
def _a ( self : int ):
'''simple docstring'''
from transformers import BertModel
A_ : List[str] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ ,lowercase_ ,"""pt""" )
@require_tf
@require_tokenizers
@slow
def _a ( self : Tuple ):
'''simple docstring'''
from transformers import TFBertModel
A_ : Any = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ ,lowercase_ ,"""tf""" )
def _a ( self : Any ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = FeatureExtractionPipeline(lowercase_ ,lowercase_ )
A_ : List[Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
A_ : Any = infer_shapes(lowercase_ ,lowercase_ )
# Assert all variables are present
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,lowercase_ )
self.assertSequenceEqual(variable_names[3:] ,lowercase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : int = ["""input_ids""", """attention_mask""", """token_type_ids"""]
A_ : List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
A_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() ,lowercase_ ,lowercase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase_ ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase_ ) ,set(lowercase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase_ ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_ : List[Any] = ensure_valid_input(FuncNonContiguousArgs() ,lowercase_ ,lowercase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase_ ) ,1 )
self.assertEqual(len(lowercase_ ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] ,"""input_ids""" )
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
A_ : str = set()
# Replace all the whitespace in our sentence
A_ : str = input_str.replace(""" """ , """""")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(_lowercase) == 26
def lowerCamelCase ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
A_ : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
A_ : Tuple = True
elif char.isupper():
A_ : Tuple = True
return all(_lowercase)
def lowerCamelCase ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def lowerCamelCase ( ):
from timeit import timeit
A_ : Dict = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("""is_pangram()""" , setup=_lowercase))
print(timeit("""is_pangram_faster()""" , setup=_lowercase))
print(timeit("""is_pangram_fastest()""" , setup=_lowercase))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _a ( self : Tuple ):
'''simple docstring'''
A_ , A_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
A_ : List[Any] = """A painting of a squirrel eating a burger"""
A_ : List[str] = jax.device_count()
A_ : str = num_samples * [prompt]
A_ : Any = sd_pipe.prepare_inputs(__lowerCAmelCase )
A_ : int = replicate(__lowerCAmelCase )
A_ : List[str] = shard(__lowerCAmelCase )
A_ : List[Any] = jax.random.PRNGKey(0 )
A_ : int = jax.random.split(__lowerCAmelCase ,jax.device_count() )
A_ : str = sd_pipe(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,num_inference_steps=25 ,jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : Dict = images[0, 253:256, 253:256, -1]
A_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : List[Any] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = """stabilityai/stable-diffusion-2"""
A_ , A_ : int = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase ,subfolder="""scheduler""" )
A_ , A_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase ,scheduler=__lowerCAmelCase ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
A_ : Any = scheduler_params
A_ : Dict = """A painting of a squirrel eating a burger"""
A_ : str = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : Tuple = sd_pipe.prepare_inputs(__lowerCAmelCase )
A_ : List[Any] = replicate(__lowerCAmelCase )
A_ : Optional[Any] = shard(__lowerCAmelCase )
A_ : Optional[int] = jax.random.PRNGKey(0 )
A_ : str = jax.random.split(__lowerCAmelCase ,jax.device_count() )
A_ : Union[str, Any] = sd_pipe(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,num_inference_steps=25 ,jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : Optional[Any] = images[0, 253:256, 253:256, -1]
A_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Any = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
__magic_name__ = 'Input must be a string of 8 numbers plus letter'
__magic_name__ = 'TRWAGMYFPDXBNJZSQVHLCKE'
def lowerCamelCase ( lowerCamelCase : Any):
if not isinstance(_snake_case , _snake_case):
A_ : Optional[Any] = F'Expected string as input, found {type(_snake_case).__name__}'
raise TypeError(_snake_case)
A_ : List[Any] = spanish_id.replace("""-""" , """""").upper()
if len(_snake_case) != 9:
raise ValueError(_snake_case)
try:
A_ : Optional[Any] = int(spanish_id_clean[0:8])
A_ : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case) from ex
if letter.isdigit():
raise ValueError(_snake_case)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = 'https://openaipublic.azureedge.net/jukebox/models/'
__magic_name__ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCamelCase ( lowerCamelCase : Dict):
if key.endswith(""".model.1.bias""") and len(key.split(""".""")) > 10:
A_ : Dict = key.replace(""".model.1.bias""" , """.conv1d_1.bias""")
elif key.endswith(""".model.1.weight""") and len(key.split(""".""")) > 10:
A_ : List[str] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""")
elif key.endswith(""".model.3.bias""") and len(key.split(""".""")) > 10:
A_ : int = key.replace(""".model.3.bias""" , """.conv1d_2.bias""")
elif key.endswith(""".model.3.weight""") and len(key.split(""".""")) > 10:
A_ : Union[str, Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""")
if "conditioner_blocks.0." in key:
A_ : Any = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""")
if "prime_prior" in key:
A_ : Union[str, Any] = key.replace("""prime_prior""" , """encoder""")
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A_ : Optional[int] = key.replace(""".emb.""" , """.""")
if key.endswith("""k"""): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""")
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""")
if "x_emb.emb." in key:
A_ : List[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""")
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""")
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""")
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""")
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""")
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""")
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""")
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""")
return key
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]):
A_ : Tuple = {}
import re
A_ : int = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""")
A_ : Dict = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""")
A_ : Optional[int] = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""")
A_ : List[Any] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""")
A_ : Optional[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""")
A_ : Dict = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""")
A_ : str = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""")
A_ : Tuple = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""")
A_ : List[Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""")
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__UpperCamelCase):
A_ : Union[str, Any] = re_encoder_block_conv_in.match(__UpperCamelCase)
A_ : int = regex_match.groups()
A_ : List[str] = int(groups[2]) * 2 + int(groups[3])
A_ : Dict = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
A_ : int = re_encoder_block_conv_in.sub(__UpperCamelCase , __UpperCamelCase)
elif re_encoder_block_resnet.fullmatch(__UpperCamelCase):
A_ : str = re_encoder_block_resnet.match(__UpperCamelCase)
A_ : Optional[Any] = regex_match.groups()
A_ : Union[str, Any] = int(groups[2]) * 2 + int(groups[3])
A_ : int = {"""1""": 1, """3""": 2}[groups[-2]]
A_ : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
A_ : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A_ : List[Any] = prefix + resnet_block
A_ : Dict = re_encoder_block_resnet.sub(__UpperCamelCase , __UpperCamelCase)
elif re_encoder_block_proj_out.fullmatch(__UpperCamelCase):
A_ : List[str] = re_encoder_block_proj_out.match(__UpperCamelCase)
A_ : Union[str, Any] = regex_match.groups()
A_ : Dict = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
A_ : Dict = re_encoder_block_proj_out.sub(__UpperCamelCase , __UpperCamelCase)
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__UpperCamelCase):
A_ : Union[str, Any] = re_decoder_block_conv_out.match(__UpperCamelCase)
A_ : Any = regex_match.groups()
A_ : Optional[int] = int(groups[2]) * 2 + int(groups[3]) - 2
A_ : List[Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
A_ : Dict = re_decoder_block_conv_out.sub(__UpperCamelCase , __UpperCamelCase)
elif re_decoder_block_resnet.fullmatch(__UpperCamelCase):
A_ : Optional[Any] = re_decoder_block_resnet.match(__UpperCamelCase)
A_ : List[str] = regex_match.groups()
A_ : int = int(groups[2]) * 2 + int(groups[3]) - 2
A_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
A_ : List[Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
A_ : Tuple = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A_ : Optional[int] = prefix + resnet_block
A_ : Optional[Any] = re_decoder_block_resnet.sub(__UpperCamelCase , __UpperCamelCase)
elif re_decoder_block_proj_in.fullmatch(__UpperCamelCase):
A_ : int = re_decoder_block_proj_in.match(__UpperCamelCase)
A_ : Tuple = regex_match.groups()
A_ : Tuple = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
A_ : int = re_decoder_block_proj_in.sub(__UpperCamelCase , __UpperCamelCase)
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__UpperCamelCase):
A_ : List[Any] = re_prior_cond_conv_out.match(__UpperCamelCase)
A_ : List[Any] = regex_match.groups()
A_ : List[str] = int(groups[1]) * 2 + int(groups[2]) - 2
A_ : List[Any] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
A_ : Any = re_prior_cond_conv_out.sub(__UpperCamelCase , __UpperCamelCase)
elif re_prior_cond_resnet.fullmatch(__UpperCamelCase):
A_ : Optional[int] = re_prior_cond_resnet.match(__UpperCamelCase)
A_ : List[str] = regex_match.groups()
A_ : int = int(groups[1]) * 2 + int(groups[2]) - 2
A_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
A_ : str = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
A_ : Union[str, Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A_ : List[str] = prefix + resnet_block
A_ : Any = re_prior_cond_resnet.sub(__UpperCamelCase , __UpperCamelCase)
elif re_prior_cond_proj_in.fullmatch(__UpperCamelCase):
A_ : Dict = re_prior_cond_proj_in.match(__UpperCamelCase)
A_ : Tuple = regex_match.groups()
A_ : List[str] = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
A_ : Tuple = re_prior_cond_proj_in.sub(__UpperCamelCase , __UpperCamelCase)
# keep original key
else:
A_ : int = original_key
A_ : int = replace_key(__UpperCamelCase)
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match')
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
A_ : List[str] = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match')
A_ : int = original_key
A_ : List[Any] = original_key
A_ : Dict = value
return new_dict
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Dict=None):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/")[-1]}'):
A_ : Union[str, Any] = requests.get(F'{PREFIX}{file}' , allow_redirects=__UpperCamelCase)
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=__UpperCamelCase)
open(F'{pytorch_dump_folder_path}/{file.split("/")[-1]}' , """wb""").write(r.content)
A_ : List[Any] = MODEL_MAPPING[model_name.split("""/""")[-1]]
A_ : Dict = JukeboxConfig.from_pretrained(__UpperCamelCase)
A_ : int = JukeboxModel(__UpperCamelCase)
A_ : Dict = []
A_ : Dict = {}
for i, dict_name in enumerate(__UpperCamelCase):
A_ : Any = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/")[-1]}')["""model"""]
A_ : str = {}
for k in old_dic.keys():
if k.endswith(""".b"""):
A_ : Tuple = old_dic[k]
elif k.endswith(""".w"""):
A_ : Union[str, Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A_ : List[Any] = old_dic[k]
else:
A_ : List[Any] = old_dic[k]
A_ : Tuple = """vqvae""" if i == 0 else F'priors.{3 - i}'
A_ : str = fix_jukebox_keys(__UpperCamelCase , model.state_dict() , __UpperCamelCase , __UpperCamelCase)
weight_dict.append(__UpperCamelCase)
A_ : Tuple = weight_dict.pop(0)
model.vqvae.load_state_dict(__UpperCamelCase)
for i in range(len(__UpperCamelCase)):
model.priors[i].load_state_dict(weight_dict[2 - i])
Path(__UpperCamelCase).mkdir(exist_ok=__UpperCamelCase)
with open(F'{pytorch_dump_folder_path}/mapping.json' , """w""") as txtfile:
json.dump(__UpperCamelCase , __UpperCamelCase)
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(__UpperCamelCase)
return weight_dict
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__magic_name__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__magic_name__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__magic_name__ = 'main'
# Default branch name
__magic_name__ = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
__magic_name__ = 'aaaaaaa'
# This commit does not exist, so we should 404.
__magic_name__ = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
__magic_name__ = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCamelCase ( ):
print("""Welcome!""")
yield
print("""Bye!""")
@contextlib.contextmanager
def lowerCamelCase ( ):
print("""Bonjour!""")
yield
print("""Au revoir!""")
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Union[str, Any] ):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("""sys.stdout""" ,new_callable=io.StringIO )
def _a ( self : Any ,_a : Optional[int] ):
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,"""Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" ,new_callable=io.StringIO )
def _a ( self : List[Any] ,_a : Dict ):
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,"""Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" ,new_callable=io.StringIO )
def _a ( self : str ,_a : int ):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,"""Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _a ( self : int ):
'''simple docstring'''
self.assertEqual(find_labels(_a ) ,["""labels"""] )
self.assertEqual(find_labels(_a ) ,["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_a ) ,["""start_positions""", """end_positions"""] )
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_a ) ,["""labels"""] )
@require_tf
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(_a ) ,["""labels"""] )
self.assertEqual(find_labels(_a ) ,["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_a ) ,["""start_positions""", """end_positions"""] )
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_a ) ,["""labels"""] )
@require_flax
def _a ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(_a ) ,[] )
self.assertEqual(find_labels(_a ) ,[] )
self.assertEqual(find_labels(_a ) ,[] )
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(_a ) ,[] )
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
'''simple docstring'''
pass
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list):
if not nums:
raise ValueError("""List is empty""")
return sum(lowerCamelCase) / len(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,_a : Any ,_a : Any=13 ,_a : Dict=32 ,_a : Union[str, Any]=2 ,_a : Optional[Any]=3 ,_a : Union[str, Any]=16 ,_a : Union[str, Any]=[32, 64, 128] ,_a : str=[1, 2, 1] ,_a : Optional[Any]=[2, 2, 4] ,_a : str=2 ,_a : str=2.0 ,_a : int=True ,_a : int=0.0 ,_a : Tuple=0.0 ,_a : str=0.1 ,_a : str="gelu" ,_a : Union[str, Any]=False ,_a : List[Any]=True ,_a : Optional[Any]=0.02 ,_a : Any=1e-5 ,_a : str=True ,_a : Dict=None ,_a : Any=True ,_a : List[Any]=10 ,_a : str=8 ,_a : str=["stage1", "stage2"] ,_a : str=[1, 2] ,):
'''simple docstring'''
A_ : str = parent
A_ : int = batch_size
A_ : int = image_size
A_ : Dict = patch_size
A_ : Any = num_channels
A_ : Optional[Any] = embed_dim
A_ : Dict = hidden_sizes
A_ : Dict = depths
A_ : Optional[Any] = num_heads
A_ : Optional[Any] = window_size
A_ : List[Any] = mlp_ratio
A_ : int = qkv_bias
A_ : Optional[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = drop_path_rate
A_ : int = hidden_act
A_ : Optional[Any] = use_absolute_embeddings
A_ : Union[str, Any] = patch_norm
A_ : int = layer_norm_eps
A_ : Optional[int] = initializer_range
A_ : Any = is_training
A_ : Optional[Any] = scope
A_ : str = use_labels
A_ : Dict = type_sequence_label_size
A_ : str = encoder_stride
A_ : Tuple = out_features
A_ : int = out_indices
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[int] = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _a ( self : List[Any] ,_a : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
A_ : Tuple = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : List[Any] = model(lowerCamelCase__ )
A_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A_ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : Dict ,_a : int ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : List[str] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
A_ : Any = None
A_ : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : List[Any] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _a ( self : List[str] ,_a : Any ,_a : Tuple ,_a : List[str] ):
'''simple docstring'''
A_ : Dict = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Optional[int] = 1
A_ : Tuple = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Union[str, Any] ,_a : Optional[int] ,_a : Any ,_a : List[str] ):
'''simple docstring'''
A_ : int = self.type_sequence_label_size
A_ : Any = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : Optional[int] = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : int = 1
A_ : Dict = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self : Any ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
A_ : str = config_and_inputs
A_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a_ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = FocalNetModelTester(self )
A_ : Tuple = ConfigTester(self ,config_class=lowerCamelCase__ ,embed_dim=37 ,has_text_modality=lowerCamelCase__ )
def _a ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[str] ):
'''simple docstring'''
return
def _a ( self : int ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def _a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def _a ( self : str ):
'''simple docstring'''
pass
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A_ : Union[str, Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A_ : Dict = model_class(lowerCamelCase__ )
A_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def _a ( self : List[str] ,_a : Any ,_a : List[str] ,_a : int ,_a : List[Any] ):
'''simple docstring'''
A_ : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
A_ : Union[str, Any] = outputs.hidden_states
A_ : Union[str, Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
# FocalNet has a different seq_length
A_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
A_ : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
A_ : Optional[Any] = reshaped_hidden_states[0].shape
A_ : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase__ ,lowerCamelCase__ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _a ( self : Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
A_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = 3
A_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A_ : Any = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A_ : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
A_ : int = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,(padded_height, padded_width) )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : Optional[int] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase__ )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : List[Any] = image_processor(images=lowerCamelCase__ ,return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A_ : Dict = model(**lowerCamelCase__ )
# verify the logits
A_ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
A_ : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a_ = (FocalNetBackbone,) if is_torch_available() else ()
a_ = FocalNetConfig
a_ = False
def _a ( self : str ):
'''simple docstring'''
A_ : List[str] = FocalNetModelTester(self )
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __a ):
'''simple docstring'''
a_ = "deberta-v2"
def __init__( self : str ,_a : str=128100 ,_a : Optional[int]=1536 ,_a : Any=24 ,_a : Any=24 ,_a : str=6144 ,_a : int="gelu" ,_a : str=0.1 ,_a : List[Any]=0.1 ,_a : List[str]=512 ,_a : Optional[Any]=0 ,_a : Union[str, Any]=0.02 ,_a : Optional[Any]=1e-7 ,_a : Optional[int]=False ,_a : Any=-1 ,_a : Any=0 ,_a : Optional[int]=True ,_a : int=None ,_a : Optional[Any]=0 ,_a : Any="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A_ : int = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : int = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[str] = initializer_range
A_ : List[Any] = relative_attention
A_ : int = max_relative_positions
A_ : List[str] = pad_token_id
A_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase_ ) == str:
A_ : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Tuple = pos_att_type
A_ : Tuple = vocab_size
A_ : Optional[Any] = layer_norm_eps
A_ : Tuple = kwargs.get("""pooler_hidden_size""" ,lowerCAmelCase_ )
A_ : str = pooler_dropout
A_ : int = pooler_hidden_act
class __lowerCAmelCase ( __a ):
'''simple docstring'''
@property
def _a ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Dict = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return 12
def _a ( self : List[Any] ,_a : Tuple ,_a : Optional[int] = -1 ,_a : Optional[int] = -1 ,_a : Optional[int] = -1 ,_a : Dict = False ,_a : Tuple = None ,_a : Optional[int] = 3 ,_a : Optional[int] = 40 ,_a : Tuple = 40 ,_a : List[Any] = None ,):
'''simple docstring'''
A_ : Optional[int] = super().generate_dummy_inputs(preprocessor=lowerCAmelCase_ ,framework=lowerCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"):
A_ : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__)
A_ : Dict = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : int):
# max_length=None => use the model max length (it's actually the default)
A_ : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : List[Any] = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCamelCase__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Dict):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
A_ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__)
A_ : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__)
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int):
model.eval()
A_ : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : int = model(**UpperCamelCase__)
A_ : List[Any] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
A_ : List[str] = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__) - 1:
A_ : Tuple = predictions[: len(eval_dataloader.dataset) - samples_seen]
A_ : str = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
A_ : Tuple = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]):
# Initialize accelerator
A_ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : str = config["""lr"""]
A_ : Any = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Optional[Any] = int(config["""batch_size"""])
A_ : Tuple = args.model_name_or_path
set_seed(UpperCamelCase__)
A_ : List[Any] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__)
# Instantiate optimizer
A_ : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Dict = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__)
if accelerator.state.deepspeed_plugin is not None:
A_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : Any = 1
A_ : List[str] = (len(UpperCamelCase__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
A_ : str = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : Tuple = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# We need to keep track of how many total steps we have iterated over
A_ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : int = 0
A_ : int = evaluate.load("""glue""" , """mrpc""")
A_ : int = num_epochs
if args.partial_train_epoch is not None:
A_ : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
A_ : str = args.resume_from_checkpoint.split("""epoch_""")[1]
A_ : Optional[int] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A_ : List[str] = int(UpperCamelCase__) + 1
A_ : Any = evaluation_loop(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
accelerator.print("""resumed checkpoint performance:""" , UpperCamelCase__)
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0])
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""])
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json') , """r""") as f:
A_ : Dict = json.load(UpperCamelCase__)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A_ : int = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__):
model.train()
for step, batch in enumerate(UpperCamelCase__):
A_ : Tuple = model(**UpperCamelCase__)
A_ : List[str] = outputs.loss
A_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A_ : List[Any] = F'epoch_{epoch}'
A_ : Dict = os.path.join(args.output_dir , UpperCamelCase__)
accelerator.save_state(UpperCamelCase__)
A_ : List[Any] = evaluation_loop(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
A_ : Any = accuracy
A_ : Optional[Any] = lr_scheduler.get_lr()[0]
A_ : Union[str, Any] = optimizer.param_groups[0]["""lr"""]
A_ : Tuple = epoch
A_ : Optional[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , UpperCamelCase__)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json') , """w""") as f:
json.dump(UpperCamelCase__ , UpperCamelCase__)
def lowerCamelCase ( ):
A_ : str = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=UpperCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=UpperCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCamelCase__ , default=2 , help="""Number of train epochs.""" , )
A_ : Optional[int] = parser.parse_args()
A_ : str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase__ , UpperCamelCase__)
if __name__ == "__main__":
main()
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__magic_name__ = Lock()
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCamelCase)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A_ : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A_ : List[Any] = min(_UpperCamelCase , _UpperCamelCase)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCamelCase)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A_ : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A_ : str = max(_UpperCamelCase , _UpperCamelCase)
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCamelCase)
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = []
A_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A_ : Optional[Any] = Pipe()
A_ : int = Pipe()
process_array_.append(
Process(
target=_UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
A_ : Optional[int] = temp_rs
A_ : List[Any] = temp_rr
for i in range(1 , len(_UpperCamelCase) - 1):
A_ : List[Any] = Pipe()
A_ : str = Pipe()
process_array_.append(
Process(
target=_UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
A_ : Any = temp_rs
A_ : str = temp_rr
process_array_.append(
Process(
target=_UpperCamelCase , args=(
len(_UpperCamelCase) - 1,
arr[len(_UpperCamelCase) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCamelCase) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_UpperCamelCase)):
A_ : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase ( ):
A_ : Tuple = list(range(10 , 0 , -1))
print("""Initial List""")
print(*_UpperCamelCase)
A_ : Union[str, Any] = odd_even_transposition(_UpperCamelCase)
print("""Sorted List\n""")
print(*_UpperCamelCase)
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=1024):
A_ , A_ : List[str] = [], []
A_ : Any = list(zip(lowerCamelCase , lowerCamelCase))
A_ , A_ : Optional[Any] = sorted_examples[0]
def is_too_big(lowerCamelCase : Tuple):
return tok(lowerCamelCase , return_tensors="""pt""").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
A_ : int = new_src + """ """ + src
A_ : List[Any] = new_tgt + """ """ + tgt
if is_too_big(lowerCamelCase) or is_too_big(lowerCamelCase): # cant fit, finalize example
finished_src.append(lowerCamelCase)
finished_tgt.append(lowerCamelCase)
A_ , A_ : List[str] = src, tgt
else: # can fit, keep adding
A_ , A_ : Optional[int] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase)
finished_tgt.append(lowerCamelCase)
return finished_src, finished_tgt
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Path , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int]):
A_ : List[str] = Path(lowerCamelCase)
save_path.mkdir(exist_ok=lowerCamelCase)
for split in ["train"]:
A_ , A_ : Optional[int] = data_dir / F'{split}.source', data_dir / F'{split}.target'
A_ : int = [x.rstrip() for x in Path(lowerCamelCase).open().readlines()]
A_ : int = [x.rstrip() for x in Path(lowerCamelCase).open().readlines()]
A_ , A_ : Dict = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
print(F'packed {split} split from {len(lowerCamelCase)} examples -> {len(lowerCamelCase)}.')
Path(save_path / F'{split}.source').open("""w""").write("""\n""".join(lowerCamelCase))
Path(save_path / F'{split}.target').open("""w""").write("""\n""".join(lowerCamelCase))
for split in ["val", "test"]:
A_ , A_ : str = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(lowerCamelCase , save_path / F'{split}.source')
shutil.copyfile(lowerCamelCase , save_path / F'{split}.target')
def lowerCamelCase ( ):
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=lowerCamelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""")
parser.add_argument("""--max_seq_len""" , type=lowerCamelCase , default=128)
parser.add_argument("""--data_dir""" , type=lowerCamelCase)
parser.add_argument("""--save_path""" , type=lowerCamelCase)
A_ : Optional[int] = parser.parse_args()
A_ : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(lowerCamelCase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int):
A_ : Union[str, Any] = os.path.abspath(_SCREAMING_SNAKE_CASE)
logger.info(F'Converting TensorFlow checkpoint from {tf_path}')
# Load weights from TF model
A_ : str = tf.train.list_variables(_SCREAMING_SNAKE_CASE)
A_ : Union[str, Any] = []
A_ : Dict = []
A_ : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
A_ : List[str] = full_name.split("""/""")
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}')
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}')
continue
if name[0] == "model":
# ignore initial 'model'
A_ : Dict = name[1:]
# figure out how many levels deep the name is
A_ : int = 0
for _name in name:
if _name.startswith("""layer_with_weights"""):
depth += 1
else:
break
layer_depth.append(_SCREAMING_SNAKE_CASE)
# read data
A_ : str = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
names.append("""/""".join(_SCREAMING_SNAKE_CASE))
arrays.append(_SCREAMING_SNAKE_CASE)
logger.info(F'Read a total of {len(_SCREAMING_SNAKE_CASE):,} layers')
# Sanity check
if len(set(_SCREAMING_SNAKE_CASE)) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE))})')
A_ : List[Any] = list(set(_SCREAMING_SNAKE_CASE))[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""")
# convert layers
logger.info("""Converting weights...""")
for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
A_ : Dict = full_name.split("""/""")
A_ : int = model
A_ : List[str] = []
for i, m_name in enumerate(_SCREAMING_SNAKE_CASE):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights"""):
A_ : Dict = int(m_name.split("""-""")[-1])
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""])
A_ : Dict = getattr(_SCREAMING_SNAKE_CASE , """embeddings""")
A_ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""")
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4)])
A_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , """encoder""")
A_ : Dict = getattr(_SCREAMING_SNAKE_CASE , """layer""")
A_ : Optional[int] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""])
A_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , """pooler""")
A_ : str = getattr(_SCREAMING_SNAKE_CASE , """dense""")
elif m_name == "embeddings":
trace.append("""embeddings""")
A_ : int = getattr(_SCREAMING_SNAKE_CASE , """embeddings""")
if layer_num == 0:
trace.append("""word_embeddings""")
A_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , """word_embeddings""")
elif layer_num == 1:
trace.append("""position_embeddings""")
A_ : Tuple = getattr(_SCREAMING_SNAKE_CASE , """position_embeddings""")
elif layer_num == 2:
trace.append("""token_type_embeddings""")
A_ : Dict = getattr(_SCREAMING_SNAKE_CASE , """token_type_embeddings""")
else:
raise ValueError(F'Unknown embedding layer with name {full_name}')
trace.append("""weight""")
A_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , """weight""")
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""])
A_ : Any = getattr(_SCREAMING_SNAKE_CASE , """attention""")
A_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , """self""")
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""])
A_ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , """attention""")
A_ : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , """output""")
A_ : Any = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""")
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""])
A_ : str = getattr(_SCREAMING_SNAKE_CASE , """attention""")
A_ : List[Any] = getattr(_SCREAMING_SNAKE_CASE , """output""")
A_ : int = getattr(_SCREAMING_SNAKE_CASE , """dense""")
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""])
A_ : List[Any] = getattr(_SCREAMING_SNAKE_CASE , """output""")
A_ : str = getattr(_SCREAMING_SNAKE_CASE , """dense""")
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""])
A_ : int = getattr(_SCREAMING_SNAKE_CASE , """output""")
A_ : str = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""")
elif m_name == "_key_dense":
# attention key
trace.append("""key""")
A_ : Tuple = getattr(_SCREAMING_SNAKE_CASE , """key""")
elif m_name == "_query_dense":
# attention query
trace.append("""query""")
A_ : Dict = getattr(_SCREAMING_SNAKE_CASE , """query""")
elif m_name == "_value_dense":
# attention value
trace.append("""value""")
A_ : int = getattr(_SCREAMING_SNAKE_CASE , """value""")
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""])
A_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , """intermediate""")
A_ : List[str] = getattr(_SCREAMING_SNAKE_CASE , """dense""")
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""")
A_ : int = getattr(_SCREAMING_SNAKE_CASE , """output""")
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""")
A_ : Dict = getattr(_SCREAMING_SNAKE_CASE , """bias""")
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""")
A_ : List[str] = getattr(_SCREAMING_SNAKE_CASE , """weight""")
else:
logger.warning(F'Ignored {m_name}')
# for certain layers reshape is necessary
A_ : int = """.""".join(_SCREAMING_SNAKE_CASE)
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _SCREAMING_SNAKE_CASE) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _SCREAMING_SNAKE_CASE):
A_ : Optional[Any] = array.reshape(pointer.data.shape)
if "kernel" in full_name:
A_ : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
A_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE)
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}')
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}')
return model
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Dict):
# Instantiate model
logger.info(F'Loading model based on config from {config_path}...')
A_ : str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE)
A_ : Optional[int] = BertModel(_SCREAMING_SNAKE_CASE)
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...')
load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...')
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__magic_name__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
import sys
from pathlib import Path
__magic_name__ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__magic_name__ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__magic_name__ = 'zero2'
__magic_name__ = 'zero3'
__magic_name__ = [ZEROa, ZEROa]
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
A_ : Any = parameterized.to_safe_name("""_""".join(str(lowerCAmelCase_) for x in param.args))
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__magic_name__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( a__ ):
'''simple docstring'''
@parameterized.expand(lowercase__ ,name_func=lowercase__ )
def _a ( self : str ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
self.run_and_check(
stage=lowercase__ ,model=lowercase__ ,distributed=lowercase__ ,fpaa=lowercase__ ,)
@require_torch_multi_gpu
@parameterized.expand(lowercase__ ,name_func=lowercase__ )
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ):
'''simple docstring'''
self.run_and_check(
stage=lowercase__ ,model=lowercase__ ,distributed=lowercase__ ,fpaa=lowercase__ ,)
@parameterized.expand(lowercase__ ,name_func=lowercase__ )
def _a ( self : Union[str, Any] ,_a : str ,_a : str ):
'''simple docstring'''
self.run_and_check(
stage=lowercase__ ,model=lowercase__ ,distributed=lowercase__ ,fpaa=lowercase__ ,)
@require_torch_multi_gpu
@parameterized.expand(lowercase__ ,name_func=lowercase__ )
def _a ( self : Optional[Any] ,_a : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase__ ,model=lowercase__ ,distributed=lowercase__ ,fpaa=lowercase__ ,)
def _a ( self : Dict ,_a : str ):
'''simple docstring'''
pass
def _a ( self : str ,_a : Union[str, Any] ,_a : str ,_a : Optional[int] = 10 ,_a : Union[str, Any] = True ,_a : Tuple = True ,_a : List[str] = True ,):
'''simple docstring'''
A_ : Dict = models[model]
A_ : int = self.run_trainer(
stage=lowercase__ ,model_name=lowercase__ ,eval_steps=lowercase__ ,num_train_epochs=1 ,distributed=lowercase__ ,fpaa=lowercase__ ,)
self.do_checks(lowercase__ )
return output_dir
def _a ( self : int ,_a : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] = 10 ,_a : Dict = 1 ,_a : List[Any] = True ,_a : Union[str, Any] = True ,):
'''simple docstring'''
A_ : Any = self.get_auto_remove_tmp_dir("""./xxx""" ,after=lowercase__ )
A_ : str = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : Dict = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A_ : Any = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A_ : Tuple = self.get_launcher(lowercase__ )
A_ : List[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase__ ,env=self.get_env() )
return output_dir
def _a ( self : Any ,_a : Dict=False ):
'''simple docstring'''
A_ : Tuple = min(2 ,get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : int=0):
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda lowerCamelCase: x[column])
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any=float("""inf""")):
for i in range(points_counts - 1):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__):
A_ : str = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
A_ : Tuple = current_dis
return min_dis
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]=float("""inf""")):
for i in range(min(6 , points_counts - 1) , SCREAMING_SNAKE_CASE__):
for j in range(max(0 , i - 6) , SCREAMING_SNAKE_CASE__):
A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]):
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# recursion
A_ : Optional[Any] = points_counts // 2
A_ : Union[str, Any] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE__)
A_ : Optional[int] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[mid:] , points_counts - mid)
A_ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
A_ : Dict = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE__)
A_ : int = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)
return min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Tuple = column_based_sort(SCREAMING_SNAKE_CASE__ , column=0)
A_ : str = column_based_sort(SCREAMING_SNAKE_CASE__ , column=1)
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
) ** 0.5
if __name__ == "__main__":
__magic_name__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def lowerCamelCase ( ):
from torch.utils.cpp_extension import load
A_ : List[str] = Path(lowerCamelCase).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
A_ : Tuple = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp"""),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu"""),
]
]
load(
"""MultiScaleDeformableAttention""" , lowerCamelCase , with_cuda=lowerCamelCase , extra_include_paths=[str(lowerCamelCase)] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=None):
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
A_ : Optional[Any] = nn.Parameter(lowercase_)
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
A_ : Optional[Any] = nn.Parameter(lowercase_)
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Tuple):
A_ : List[Any] = np.asarray(weights[0])
A_ : Any = np.asarray(weights[1])
A_ : str = np.asarray(weights[2])
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowercase_).transpose(1 , 2).contiguous().view(-1 , lowercase_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_).transpose(1 , 2).contiguous().view(-1 , lowercase_) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_).view(-1 , lowercase_).contiguous().transpose(0 , 1) , )
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any):
A_ : int = np.asarray(weights[0])
A_ : Optional[int] = np.asarray(weights[1])
A_ : str = np.asarray(weights[2])
A_ : List[str] = np.asarray(weights[3])
set_param(
torch_layer.self_attention.query , torch.tensor(lowercase_).transpose(1 , 2).contiguous().view(-1 , lowercase_) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowercase_).transpose(1 , 2).contiguous().view(-1 , lowercase_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_).transpose(1 , 2).contiguous().view(-1 , lowercase_) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_).view(-1 , lowercase_).contiguous().transpose(0 , 1) , )
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int):
A_ : Union[str, Any] = weights[0][0][0]
A_ : Any = np.asarray(layer_norm_a[0])
A_ : int = np.asarray(layer_norm_a[1])
set_param(
torch_block.attention.layer_norm , torch.tensor(lowercase_) , torch.tensor(lowercase_) , )
# lsh weights + output
A_ : List[Any] = weights[0][1]
if len(lowercase_) < 4:
set_layer_weights_in_torch_lsh(lowercase_ , torch_block.attention , lowercase_)
else:
set_layer_weights_in_torch_local(lowercase_ , torch_block.attention , lowercase_)
# intermediate weighs
A_ : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase_) == 4:
A_ : Tuple = intermediate_weights[2]
# layernorm 2
A_ : int = np.asarray(intermediate_weights[0][0])
A_ : Optional[int] = np.asarray(intermediate_weights[0][1])
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowercase_) , torch.tensor(lowercase_) , )
# intermediate dense
A_ : List[Any] = np.asarray(intermediate_weights[1][0])
A_ : Tuple = np.asarray(intermediate_weights[1][1])
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowercase_).transpose(0 , 1).contiguous() , torch.tensor(lowercase_) , )
# intermediate out
A_ : Union[str, Any] = np.asarray(intermediate_weights[4][0])
A_ : List[Any] = np.asarray(intermediate_weights[4][1])
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowercase_).transpose(0 , 1).contiguous() , torch.tensor(lowercase_) , )
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]):
A_ : Optional[Any] = torch_model.reformer
# word embeds
A_ : List[str] = np.asarray(weights[1])
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowercase_) , )
if isinstance(weights[3] , lowercase_):
A_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
A_ : List[str] = np.asarray(weights[3][emb_idx][0])
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
A_ : Dict = nn.Parameter(torch.tensor(lowercase_))
A_ : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers) * 4 == len(
lowercase_), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
A_ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase_ , lowercase_ , lowercase_)
# output layer norm
A_ : int = np.asarray(weights[7][0])
A_ : List[Any] = np.asarray(weights[7][1])
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowercase_) , torch.tensor(lowercase_) , )
# output embeddings
A_ : str = np.asarray(weights[9][0])
A_ : Any = np.asarray(weights[9][1])
set_param(
torch_model.lm_head.decoder , torch.tensor(lowercase_).transpose(0 , 1).contiguous() , torch.tensor(lowercase_) , )
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any):
A_ : Dict = ReformerConfig.from_json_file(lowercase_)
print(F'Building PyTorch model from configuration: {config}')
A_ : Dict = ReformerModelWithLMHead(lowercase_)
with open(lowercase_ , """rb""") as f:
A_ : Optional[Any] = pickle.load(lowercase_)["weights"]
set_model_weights_in_torch(lowercase_ , lowercase_ , config.hidden_size)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , lowercase_)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : int):
if not nums:
return 0
A_ : Any = nums[0]
A_ : Optional[Any] = 0
for num in nums[1:]:
A_ , A_ : Dict = (
max_excluding + num,
max(__UpperCamelCase , __UpperCamelCase),
)
return max(__UpperCamelCase , __UpperCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : str = """hf-internal-testing/tiny-random-t5"""
A_ : str = AutoTokenizer.from_pretrained(_lowerCAmelCase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
A_ : Optional[Any] = tokenizer("""This is me""" ,return_tensors="""pt""" )
A_ : str = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
A_ : List[str] = model.generate(**_lowerCAmelCase )
A_ : Optional[int] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
A_ : Optional[Any] = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase ,_lowerCAmelCase ) )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = """hf-internal-testing/tiny-random-t5"""
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
A_ : str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
A_ : str = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase )
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
import operator
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple = False , lowerCamelCase : List[str] = None):
A_ : str = operator.lt if reverse else operator.gt
A_ : Optional[Any] = solution or []
if not arr:
return solution
A_ : Dict = [arr.pop(0)]
for i, item in enumerate(snake_case_):
if _operator(snake_case_ , sublist[-1]):
sublist.append(snake_case_)
arr.pop(snake_case_)
# merging sublist into solution list
if not solution:
solution.extend(snake_case_)
else:
while sublist:
A_ : Union[str, Any] = sublist.pop(0)
for i, xx in enumerate(snake_case_):
if not _operator(snake_case_ , snake_case_):
solution.insert(snake_case_ , snake_case_)
break
else:
solution.append(snake_case_)
strand_sort(snake_case_ , snake_case_ , snake_case_)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _a ( self : List[Any] ,_a : int ,_a : Any ,_a : Dict ):
'''simple docstring'''
A_ : Dict = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
A_ : Dict = VideoClassificationPipeline(model=UpperCamelCase__ ,image_processor=UpperCamelCase__ ,top_k=2 )
A_ : Optional[int] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _a ( self : Union[str, Any] ,_a : Any ,_a : Any ):
'''simple docstring'''
for example in examples:
A_ : Tuple = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ ,[
{"""score""": ANY(UpperCamelCase__ ), """label""": ANY(UpperCamelCase__ )},
{"""score""": ANY(UpperCamelCase__ ), """label""": ANY(UpperCamelCase__ )},
] ,)
@require_torch
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A_ : Tuple = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
A_ : Dict = pipeline(
"""video-classification""" ,model=UpperCamelCase__ ,feature_extractor=UpperCamelCase__ ,frame_sampling_rate=4 )
A_ : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
A_ : List[str] = video_classifier(UpperCamelCase__ ,top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ,decimals=4 ) ,[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] ,)
A_ : Dict = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(UpperCamelCase__ ,decimals=4 ) ,[
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def _a ( self : int ):
'''simple docstring'''
pass
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Optional[str] = None):
if version.parse(hfh.__version__).release < version.parse("""0.11.0""").release:
# old versions of hfh don't url-encode the file path
A_ : Union[str, Any] = quote(lowerCamelCase_)
return hfh.hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" , revision=lowerCamelCase_)
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__magic_name__ = datasets.utils.logging.get_logger(__name__)
__magic_name__ = ["names", "prefix"]
__magic_name__ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__magic_name__ = ["encoding_errors", "on_bad_lines"]
__magic_name__ = ["date_format"]
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
a_ = ""","""
a_ = None
a_ = """infer"""
a_ = None
a_ = None
a_ = None
a_ = None
a_ = None
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
a_ = False
a_ = None
a_ = None
a_ = None
a_ = True
a_ = True
a_ = False
a_ = True
a_ = None
a_ = """."""
a_ = None
a_ = """\""""
a_ = 0
a_ = None
a_ = None
a_ = None
a_ = None
a_ = True
a_ = True
a_ = 0
a_ = True
a_ = False
a_ = None
a_ = 10_000
a_ = None
a_ = """strict"""
a_ = """error"""
a_ = None
def _a ( self : Dict ):
'''simple docstring'''
if self.delimiter is not None:
A_ : str = self.delimiter
if self.column_names is not None:
A_ : List[str] = self.column_names
@property
def _a ( self : Any ):
'''simple docstring'''
A_ : str = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,_a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
a_ = CsvConfig
def _a ( self : List[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a ,(str, list, tuple) ):
A_ : Dict = data_files
if isinstance(_a ,_a ):
A_ : Dict = [files]
A_ : Dict = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
A_ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(_a ,_a ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={"""files""": files} ) )
return splits
def _a ( self : List[Any] ,_a : str ):
'''simple docstring'''
if self.config.features is not None:
A_ : str = self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
A_ : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A_ : Optional[int] = table_cast(_a ,_a )
return pa_table
def _a ( self : List[str] ,_a : List[str] ):
'''simple docstring'''
A_ : Dict = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
A_ : Optional[Any] = pd.read_csv(_a ,iterator=_a ,dtype=_a ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
A_ : Union[str, Any] = pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_a )}: {e}' )
raise
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,_a : str = "cpu" ,_a : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
A_ : Any = device
A_ : Any = CLIPTokenizerFast.from_pretrained(__A )
A_ : Any = [0.48145466, 0.4578275, 0.40821073]
A_ : Union[str, Any] = [0.26862954, 0.26130258, 0.27577711]
A_ : Tuple = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
A_ : Any = torchvision.transforms.Resize(224 )
A_ : Tuple = torchvision.transforms.CenterCrop(224 )
def _a ( self : Any ,_a : List[Any] ):
'''simple docstring'''
A_ : List[Any] = self.resize(__A )
A_ : Optional[Any] = self.center_crop(__A )
A_ : List[Any] = self.normalize(__A )
return images
def __call__( self : Optional[Any] ,_a : Dict=None ,_a : Optional[Any]=None ,**_a : Tuple ):
'''simple docstring'''
A_ : str = self.tokenizer(text=__A ,**__A )
A_ : Tuple = self.preprocess_img(__A )
A_ : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_a : Union[str, Any]=10 ,_a : Tuple=0.01 ,_a : List[str]=None ,_a : int=None ,_a : Optional[int]=None ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Any=None ,_a : List[str]=False ,_a : Optional[int]=True ,_a : Dict="image" ,_a : int=True ,_a : Optional[Any]=False ,_a : Dict=False ,_a : str=False ,):
'''simple docstring'''
super().__init__()
A_ : List[str] = None
A_ : str = device if device else get_device()
if vqgan:
A_ : int = vqgan
else:
A_ : Union[str, Any] = load_vqgan(self.device ,conf_path=__A ,ckpt_path=__A )
self.vqgan.eval()
if clip:
A_ : Tuple = clip
else:
A_ : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A_ : Optional[Any] = ProcessorGradientFlow(device=self.device )
A_ : Dict = iterations
A_ : List[str] = lr
A_ : Union[str, Any] = log
A_ : str = make_grid
A_ : Dict = return_val
A_ : Optional[int] = quantize
A_ : Optional[Any] = self.vqgan.decoder.z_shape
def _a ( self : Tuple ,_a : str=None ,_a : int=None ,_a : Union[str, Any]=5 ,_a : List[str]=True ):
'''simple docstring'''
A_ : int = []
if output_path is None:
A_ : Optional[int] = """./animation.gif"""
if input_path is None:
A_ : Union[str, Any] = self.save_path
A_ : Tuple = sorted(glob(input_path + """/*""" ) )
if not len(__A ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__A ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A_ : Dict = total_duration / len(__A )
A_ : List[str] = [frame_duration] * len(__A )
if extend_frames:
A_ : Optional[int] = 1.5
A_ : Optional[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A ,__A ,duration=__A )
print(f'gif saved to {output_path}' )
def _a ( self : Dict ,_a : int=None ,_a : Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A_ : Optional[Any] = preprocess(Image.open(__A ) ,target_image_size=256 ).to(self.device )
A_ : Union[str, Any] = preprocess_vqgan(__A )
A_ , *A_ : Dict = self.vqgan.encode(__A )
return z
def _a ( self : Any ,_a : List[str] ):
'''simple docstring'''
A_ : List[Any] = self.latent.detach().requires_grad_()
A_ : Optional[int] = base_latent + transform_vector
if self.quantize:
A_ , *A_ : Dict = self.vqgan.quantize(__A )
else:
A_ : Tuple = trans_latent
return self.vqgan.decode(__A )
def _a ( self : Any ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[Any]=None ):
'''simple docstring'''
A_ : List[Any] = self.clip_preprocessor(text=__A ,images=__A ,return_tensors="""pt""" ,padding=__A )
A_ : str = self.clip(**__A )
A_ : Union[str, Any] = clip_outputs.logits_per_image
if weights is not None:
A_ : Any = similarity_logits * weights
return similarity_logits.sum()
def _a ( self : Dict ,_a : Optional[Any] ,_a : Dict ,_a : List[Any] ):
'''simple docstring'''
A_ : str = self._get_clip_similarity(pos_prompts["""prompts"""] ,__A ,weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A_ : int = self._get_clip_similarity(neg_prompts["""prompts"""] ,__A ,weights=neg_prompts["""weights"""] )
else:
A_ : Tuple = torch.tensor([1] ,device=self.device )
A_ : List[str] = -torch.log(__A ) + torch.log(__A )
return loss
def _a ( self : Dict ,_a : str ,_a : int ,_a : List[str] ):
'''simple docstring'''
A_ : List[Any] = torch.randn_like(self.latent ,requires_grad=__A ,device=self.device )
A_ : List[str] = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A_ : Tuple = self._add_vector(__A )
A_ : Union[str, Any] = loop_post_process(__A )
A_ : List[Any] = self._get_CLIP_loss(__A ,__A ,__A )
print("""CLIP loss""" ,__A )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _a ( self : List[str] ,_a : List[Any] ,_a : Optional[Any] ,_a : Any ):
'''simple docstring'''
wandb.init(reinit=__A ,project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A_ : Any = Image.open(__A )
A_ : Optional[int] = image.resize((256, 256) )
wandb.log("""Original Image""" ,wandb.Image(__A ) )
def _a ( self : Union[str, Any] ,_a : Optional[Any] ):
'''simple docstring'''
if not prompts:
return []
A_ : Dict = []
A_ : str = []
if isinstance(__A ,__A ):
A_ : str = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__A ,(tuple, list) ):
A_ : Tuple = prompt[0]
A_ : Tuple = float(prompt[1] )
elif ":" in prompt:
A_ , A_ : Dict = prompt.split(""":""" )
A_ : Union[str, Any] = float(__A )
else:
A_ : Tuple = prompt
A_ : Union[str, Any] = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A ,device=self.device ),
}
def _a ( self : List[str] ,_a : str ,_a : Any=None ,_a : List[Any]=None ,_a : Any=True ,_a : str=False ,_a : int=True ,_a : Tuple=True ,_a : Union[str, Any]=None ,):
'''simple docstring'''
if image_path:
A_ : List[Any] = self._get_latent(__A )
else:
A_ : Tuple = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(__A ,__A ,__A )
assert pos_prompts, "You must provide at least one positive prompt."
A_ : List[Any] = self.process_prompts(__A )
A_ : str = self.process_prompts(__A )
if save_final and save_path is None:
A_ : List[str] = os.path.join("""./outputs/""" ,"""_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
A_ : int = save_path + """_""" + get_timestamp()
os.makedirs(__A )
A_ : Optional[int] = save_path
A_ : List[str] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__A ) )
A_ : List[str] = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A ,__A ,__A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path ,f'iter_{iter:03d}_final.png' ) )
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
__magic_name__ = range(2, 20 + 1)
__magic_name__ = [10**k for k in range(ks[-1] + 1)]
__magic_name__ = {}
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any):
A_ : Any = sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase)))
A_ : List[str] = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase) , lowerCamelCase)))
A_ : int = 0, 0
A_ : List[Any] = n - i
A_ : Optional[int] = memo.get(lowerCamelCase)
if sub_memo is not None:
A_ : List[str] = sub_memo.get(lowerCamelCase)
if jumps is not None and len(lowerCamelCase) > 0:
# find and make the largest jump without going over
A_ : List[str] = -1
for _k in range(len(lowerCamelCase) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ : Union[str, Any] = _k
break
if max_jump >= 0:
A_ : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ : List[Any] = diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase))):
A_ : Tuple = divmod(lowerCamelCase , 10)
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase)
else:
A_ : Optional[int] = []
else:
A_ : List[Any] = {c: []}
A_ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ : Optional[int] = next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ : Union[str, Any] = compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase)
diff += _diff
dn += terms_jumped
A_ : int = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ : Union[str, Any] = 0
while j < len(lowerCamelCase):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k))
return (diff, dn)
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : int):
if i >= n:
return 0, i
if k > len(lowerCamelCase):
a_i.extend([0 for _ in range(k - len(lowerCamelCase))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ : Tuple = i
A_ : Optional[int] = 0, 0, 0
for j in range(len(lowerCamelCase)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ : Tuple = ds_c + ds_b
diff += addend
A_ : int = 0
for j in range(lowerCamelCase):
A_ : Tuple = a_i[j] + addend
A_ : Union[str, Any] = divmod(lowerCamelCase , 10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase)
return diff, i - start_i
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict):
for j in range(lowerCamelCase , len(lowerCamelCase)):
A_ : Any = digits[j] + addend
if s >= 10:
A_ : Tuple = divmod(lowerCamelCase , 10)
A_ : Tuple = addend // 10 + quotient
else:
A_ : Dict = s
A_ : List[str] = addend // 10
if addend == 0:
break
while addend > 0:
A_ : Optional[int] = divmod(lowerCamelCase , 10)
digits.append(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : int = 10**15):
A_ : Optional[int] = [1]
A_ : List[str] = 1
A_ : Tuple = 0
while True:
A_ : Tuple = next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase)
dn += terms_jumped
if dn == n - i:
break
A_ : Union[str, Any] = 0
for j in range(len(lowerCamelCase)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """xlm-roberta"""
def __init__( self : List[Any] ,_a : List[str]=30522 ,_a : Optional[Any]=768 ,_a : str=12 ,_a : Dict=12 ,_a : int=3072 ,_a : Union[str, Any]="gelu" ,_a : Tuple=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : int=2 ,_a : Any=0.02 ,_a : Optional[int]=1e-12 ,_a : Union[str, Any]=1 ,_a : Dict=0 ,_a : Optional[Any]=2 ,_a : Dict="absolute" ,_a : List[str]=True ,_a : List[str]=None ,**_a : Any ,):
'''simple docstring'''
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
A_ : Optional[Any] = vocab_size
A_ : Tuple = hidden_size
A_ : str = num_hidden_layers
A_ : str = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : int = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : List[Any] = initializer_range
A_ : str = layer_norm_eps
A_ : List[Any] = position_embedding_type
A_ : int = use_cache
A_ : Any = classifier_dropout
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True])
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : int):
A_ : Union[str, Any] = tmp_path / """cache"""
A_ : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple):
A_ : Optional[Any] = tmp_path / """cache"""
A_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[Any] = features.copy() if features else default_expected_features
A_ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
A_ : Tuple = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train"""), """train""", """test"""])
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = tmp_path / """cache"""
A_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[int] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list])
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : List[Any]):
if issubclass(_lowerCamelCase , _lowerCamelCase):
A_ : List[Any] = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
A_ : int = [parquet_path]
A_ : Union[str, Any] = tmp_path / """cache"""
A_ : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
A_ : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True])
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : int):
A_ : Any = tmp_path / """cache"""
A_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Any = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : int):
A_ : Tuple = tmp_path / """cache"""
A_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[int] = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
A_ : List[str] = ParquetDatasetReader({"""train""": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train"""), """train""", """test"""])
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : List[str]):
if split:
A_ : List[Any] = {split: parquet_path}
else:
A_ : List[str] = """train"""
A_ : Optional[Any] = {"""train""": parquet_path, """test""": parquet_path}
A_ : List[str] = tmp_path / """cache"""
A_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str):
A_ : List[str] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / """foo.parquet""")
assert writer.write() > 0
A_ : Any = pq.ParquetFile(tmp_path / """foo.parquet""")
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Dict):
A_ : Optional[int] = str(shared_datadir / """test_image_rgb.jpg""")
A_ : Optional[Any] = {"""image""": [image_path]}
A_ : List[str] = Features({"""image""": Image()})
A_ : int = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase)
A_ : Tuple = ParquetDatasetWriter(_lowerCamelCase , tmp_path / """foo.parquet""")
assert writer.write() > 0
A_ : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet"""))
assert dataset.features == reloaded_dataset.features
A_ : List[str] = ParquetDatasetReader(str(tmp_path / """foo.parquet""") , streaming=_lowerCamelCase).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""")}), None),
(Features({"""image""": Image(), """foo""": Value("""int32""")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
assert get_writer_batch_size(_lowerCamelCase) == expected
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = '''biogpt'''
def __init__( self : Optional[int] ,_a : str=42384 ,_a : int=1024 ,_a : str=24 ,_a : Tuple=16 ,_a : Optional[int]=4096 ,_a : str="gelu" ,_a : int=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=1024 ,_a : Optional[int]=0.02 ,_a : List[Any]=1e-12 ,_a : Tuple=True ,_a : Tuple=True ,_a : Dict=0.0 ,_a : int=0.0 ,_a : Union[str, Any]=1 ,_a : Any=0 ,_a : Dict=2 ,**_a : int ,):
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : List[str] = max_position_embeddings
A_ : Optional[int] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : Dict = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : Tuple = scale_embedding
A_ : Tuple = use_cache
A_ : Dict = layerdrop
A_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : str ,_a : str=13 ,_a : Optional[int]=7 ,_a : str=True ,_a : Union[str, Any]=True ,_a : Tuple=True ,_a : int=True ,_a : List[Any]=99 ,_a : int=32 ,_a : Any=5 ,_a : Dict=4 ,_a : Union[str, Any]=37 ,_a : str="gelu" ,_a : str=0.1 ,_a : List[str]=0.1 ,_a : Optional[Any]=512 ,_a : Dict=16 ,_a : List[Any]=2 ,_a : List[Any]=0.02 ,_a : List[str]=4 ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Tuple = batch_size
A_ : List[str] = seq_length
A_ : Union[str, Any] = is_training
A_ : Dict = use_attention_mask
A_ : int = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : Optional[int] = vocab_size
A_ : Dict = hidden_size
A_ : Any = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Optional[Any] = type_sequence_label_size
A_ : str = initializer_range
A_ : Optional[int] = num_choices
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : Union[str, Any] = None
if self.use_attention_mask:
A_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=_a ,)
return config, input_ids, attention_mask
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[Any] = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = FlaxDistilBertModelTester(self )
@slow
def _a ( self : Dict ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ : Dict = model_class_name.from_pretrained("""distilbert-base-uncased""" )
A_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : Dict ):
'''simple docstring'''
A_ : str = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A_ : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : List[str] = model(_a ,attention_mask=_a )[0]
A_ : Dict = (1, 11, 768)
self.assertEqual(output.shape ,_a )
A_ : int = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,_a ,atol=1e-4 ) )
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyImgaImgPipeline
a_ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a_ = False
@property
def _a ( self : str ):
'''simple docstring'''
return 32
@property
def _a ( self : str ):
'''simple docstring'''
return 32
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Dict ):
'''simple docstring'''
return 100
@property
def _a ( self : str ):
'''simple docstring'''
A_ : int = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
A_ : Any = MultilingualCLIP(UpperCamelCase__ )
A_ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _a ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[Any] = self.dummy_text_encoder
A_ : List[Any] = self.dummy_tokenizer
A_ : Optional[Any] = self.dummy_unet
A_ : Tuple = self.dummy_movq
A_ : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ : int = DDIMScheduler(**UpperCamelCase__ )
A_ : List[str] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : List[str] ,_a : Union[str, Any] ,_a : int=0 ):
'''simple docstring'''
A_ : List[str] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
A_ : Tuple = floats_tensor((1, 3, 64, 64) ,rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : str = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(UpperCamelCase__ )
else:
A_ : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Union[str, Any] = self.pipeline_class(**UpperCamelCase__ )
A_ : Tuple = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ : Tuple = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ : List[Any] = output.images
A_ : Dict = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) ,return_dict=UpperCamelCase__ ,)[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Any = """A red cartoon frog, 4k"""
A_ : Optional[int] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A_ : List[str] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" ,torch_dtype=torch.floataa )
A_ : List[str] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ : Union[str, Any] = pipe_prior(
UpperCamelCase__ ,generator=UpperCamelCase__ ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : Dict = pipeline(
UpperCamelCase__ ,image=UpperCamelCase__ ,image_embeds=UpperCamelCase__ ,negative_image_embeds=UpperCamelCase__ ,generator=UpperCamelCase__ ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type="""np""" ,)
A_ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ ,UpperCamelCase__ )
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__magic_name__ = """CompVis/stable-diffusion-v1-1"""
__magic_name__ = """CompVis/stable-diffusion-v1-2"""
__magic_name__ = """CompVis/stable-diffusion-v1-3"""
__magic_name__ = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self : Tuple ,_a : List[str] ,_a : Optional[Any] ,_a : int ,_a : Any ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Any = True ,):
'''simple docstring'''
super()._init_()
A_ : str = StableDiffusionPipeline.from_pretrained(_a )
A_ : List[str] = StableDiffusionPipeline.from_pretrained(_a )
A_ : List[Any] = StableDiffusionPipeline.from_pretrained(_a )
A_ : int = StableDiffusionPipeline(
vae=_a ,text_encoder=_a ,tokenizer=_a ,unet=_a ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,requires_safety_checker=_a ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def _a ( self : str ):
'''simple docstring'''
return {k: getattr(self ,_a ) for k in self.config.keys() if not k.startswith("""_""" )}
def _a ( self : List[str] ,_a : Optional[int] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _a ( self : List[Any] ):
'''simple docstring'''
self.enable_attention_slicing(_a )
@torch.no_grad()
def _a ( self : Optional[int] ,_a : int ,_a : int = 512 ,_a : Union[str, Any] = 512 ,_a : List[Any] = 50 ,_a : int = 7.5 ,_a : Any = None ,_a : Optional[Any] = 1 ,_a : Dict = 0.0 ,_a : List[Any] = None ,_a : List[Any] = None ,_a : str = "pil" ,_a : List[Any] = True ,_a : Optional[Any] = None ,_a : int = 1 ,**_a : List[Any] ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : Tuple ,_a : Union[str, Any] ,_a : Optional[int] = 512 ,_a : Union[str, Any] = 512 ,_a : Dict = 50 ,_a : Optional[Any] = 7.5 ,_a : Tuple = None ,_a : List[Any] = 1 ,_a : List[str] = 0.0 ,_a : str = None ,_a : Union[str, Any] = None ,_a : Optional[Any] = "pil" ,_a : Tuple = True ,_a : Optional[int] = None ,_a : Union[str, Any] = 1 ,**_a : str ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : Optional[int] ,_a : str ,_a : Union[str, Any] = 512 ,_a : List[str] = 512 ,_a : Dict = 50 ,_a : List[Any] = 7.5 ,_a : Optional[Any] = None ,_a : Any = 1 ,_a : List[Any] = 0.0 ,_a : Tuple = None ,_a : Dict = None ,_a : Tuple = "pil" ,_a : List[str] = True ,_a : List[str] = None ,_a : List[str] = 1 ,**_a : List[str] ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : int ,_a : Optional[int] ,_a : Optional[int] = 512 ,_a : str = 512 ,_a : Optional[Any] = 50 ,_a : List[Any] = 7.5 ,_a : Union[str, Any] = None ,_a : Any = 1 ,_a : Optional[Any] = 0.0 ,_a : int = None ,_a : int = None ,_a : Optional[Any] = "pil" ,_a : Optional[Any] = True ,_a : Tuple = None ,_a : Union[str, Any] = 1 ,**_a : str ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : Optional[Any] ,_a : Any ,_a : Optional[Any] = 512 ,_a : Optional[Any] = 512 ,_a : List[str] = 50 ,_a : int = 7.5 ,_a : Optional[Any] = None ,_a : Optional[int] = 1 ,_a : int = 0.0 ,_a : Any = None ,_a : int = None ,_a : Optional[Any] = "pil" ,_a : str = True ,_a : List[str] = None ,_a : Any = 1 ,**_a : List[str] ,):
'''simple docstring'''
A_ : str = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
A_ : Optional[int] = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get first result from Stable Diffusion Checkpoint v1.2
A_ : str = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get first result from Stable Diffusion Checkpoint v1.3
A_ : int = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get first result from Stable Diffusion Checkpoint v1.4
A_ : Tuple = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
a_ = """openai-gpt"""
a_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any ,_a : Dict=40478 ,_a : Any=512 ,_a : Optional[Any]=768 ,_a : Optional[Any]=12 ,_a : List[str]=12 ,_a : Dict="gelu" ,_a : Optional[int]=0.1 ,_a : Tuple=0.1 ,_a : str=0.1 ,_a : Optional[int]=1e-5 ,_a : Optional[Any]=0.02 ,_a : List[str]="cls_index" ,_a : Union[str, Any]=True ,_a : Optional[int]=None ,_a : Dict=True ,_a : Union[str, Any]=0.1 ,**_a : str ,):
'''simple docstring'''
A_ : List[str] = vocab_size
A_ : int = n_positions
A_ : int = n_embd
A_ : Any = n_layer
A_ : List[str] = n_head
A_ : Dict = afn
A_ : int = resid_pdrop
A_ : str = embd_pdrop
A_ : str = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : List[str] = initializer_range
A_ : Tuple = summary_type
A_ : Optional[int] = summary_use_proj
A_ : Optional[Any] = summary_activation
A_ : List[str] = summary_first_dropout
A_ : Tuple = summary_proj_to_labels
super().__init__(**lowercase__ )
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """attention_mask"""]
def __init__( self : Dict ,_a : List[Any]=80 ,_a : Tuple=16000 ,_a : List[str]=80 ,_a : int=0.0 ,_a : str=True ,_a : List[Any]=True ,_a : Any=True ,**_a : int ,):
'''simple docstring'''
super().__init__(feature_size=_a ,sampling_rate=_a ,padding_value=_a ,**_a )
A_ : Optional[int] = num_mel_bins
A_ : Union[str, Any] = do_ceptral_normalize
A_ : int = normalize_means
A_ : Tuple = normalize_vars
A_ : Tuple = True
def _a ( self : List[str] ,_a : np.ndarray ,):
'''simple docstring'''
A_ : str = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A_ : Union[str, Any] = torch.from_numpy(_a ).unsqueeze(0 )
A_ : Optional[int] = ta_kaldi.fbank(_a ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _a ( _a : np.ndarray ,_a : int ,_a : Optional[bool] = True ,_a : Optional[bool] = True ,_a : float = 0.0 ,):
'''simple docstring'''
if normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Dict = np.subtract(_a ,_a )
if normalize_vars:
A_ : Dict = x[:input_length].std(axis=0 )
A_ : List[Any] = np.divide(_a ,_a )
if input_length < x.shape[0]:
A_ : Optional[Any] = padding_value
# make sure array is in float32
A_ : Any = x.astype(np.floataa )
return x
def _a ( self : Dict ,_a : List[np.ndarray] ,_a : Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_a ,_a ,self.normalize_means ,self.normalize_vars ,self.padding_value )
for x, n in zip(_a ,_a )
]
def __call__( self : Any ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[int] = None ,_a : Optional[bool] = None ,**_a : Any ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Dict = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : Optional[int] = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : Dict = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [raw_speech]
# extract fbank features
A_ : Tuple = [self._extract_fbank_features(_a ) for waveform in raw_speech]
# convert into correct format for padding
A_ : Tuple = BatchFeature({"""input_features""": features} )
A_ : Tuple = self.pad(
_a ,padding=_a ,max_length=_a ,truncation=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,**_a ,)
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,_a ):
A_ : int = [np.asarray(_a ,dtype=np.floataa ) for feature in input_features]
A_ : int = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A_ : str = [np.asarray(_a ,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A_ : Tuple = (
np.array(_a ,dtype=np.intaa )
if self._get_padding_strategies(_a ,max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ : int = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=_a )
if return_tensors is not None:
A_ : int = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__magic_name__ = logging.getLogger(__name__)
class __lowerCAmelCase ( __lowercase ):
'''simple docstring'''
a_ = """summarization"""
a_ = ["""loss"""]
a_ = ROUGE_KEYS
a_ = """rouge2"""
def __init__( self : str ,_a : int ,**_a : Optional[int] ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_A ,num_labels=_A ,mode=self.mode ,**_A )
use_task_specific_params(self.model ,"""summarization""" )
save_git_info(self.hparams.output_dir )
A_ : Optional[Any] = Path(self.output_dir ) / """metrics.json"""
A_ : Union[str, Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams ,self.hparams_save_path )
A_ : Any = 0
A_ : Dict = defaultdict(_A )
A_ : int = self.config.model_type
A_ : Any = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
A_ : Union[str, Any] = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
A_ : Union[str, Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Optional[int] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : Dict = get_git_info()["""repo_sha"""]
A_ : Optional[int] = hparams.num_workers
A_ : Optional[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_A ):
A_ : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Union[str, Any] = self.decoder_start_token_id
A_ : Any = (
SeqaSeqDataset if hasattr(self.tokenizer ,"""prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
A_ : List[Any] = False
A_ : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : List[Any] = self.hparams.eval_max_gen_length
else:
A_ : Optional[int] = self.model.config.max_length
A_ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _a ( self : str ,_a : Dict[str, torch.Tensor] ):
'''simple docstring'''
A_ : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_A ,Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / """tok_batch.json""" )
A_ : Tuple = True
return readable_batch
def _a ( self : Tuple ,_a : Tuple ,**_a : Dict ):
'''simple docstring'''
return self.model(_A ,**_A )
def _a ( self : Any ,_a : List[int] ):
'''simple docstring'''
A_ : List[str] = self.tokenizer.batch_decode(
_A ,skip_special_tokens=_A ,clean_up_tokenization_spaces=_A )
return lmap(str.strip ,_A )
def _a ( self : List[str] ,_a : dict ):
'''simple docstring'''
A_ : Any = self.tokenizer.pad_token_id
A_ , A_ : Tuple = batch["""input_ids"""], batch["""attention_mask"""]
A_ : Any = batch["""labels"""]
if isinstance(self.model ,_A ):
A_ : Optional[int] = self.model._shift_right(_A )
else:
A_ : Any = shift_tokens_right(_A ,_A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(_A )
A_ : int = self(_A ,attention_mask=_A ,decoder_input_ids=_A ,use_cache=_A )
A_ : List[Any] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
A_ : Union[str, Any] = nn.functional.log_softmax(_A ,dim=-1 )
A_ , A_ : Union[str, Any] = label_smoothed_nll_loss(
_A ,_A ,self.hparams.label_smoothing ,ignore_index=_A )
return (loss,)
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def _a ( self : Optional[int] ,_a : str ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self._step(_A )
A_ : List[str] = dict(zip(self.loss_names ,_A ) )
# tokens per batch
A_ : int = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
A_ : Tuple = batch["""input_ids"""].shape[0]
A_ : Any = batch["""input_ids"""].eq(self.pad ).sum()
A_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _a ( self : str ,_a : List[str] ,_a : Tuple ):
'''simple docstring'''
return self._generative_step(_A )
def _a ( self : Dict ,_a : Optional[Any] ,_a : Optional[int]="val" ):
'''simple docstring'''
self.step_count += 1
A_ : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : List[str] = losses["""loss"""]
A_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
A_ : int = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : Union[str, Any] = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
A_ : Union[str, Any] = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
A_ : List[Any] = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
A_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _a ( self : Any ,_a : str ,_a : Dict ):
'''simple docstring'''
return calculate_rouge(_A ,_A )
def _a ( self : Tuple ,_a : dict ):
'''simple docstring'''
A_ : Optional[Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : int = self.model.generate(
batch["""input_ids"""] ,attention_mask=batch["""attention_mask"""] ,use_cache=_A ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
A_ : List[Any] = (time.time() - ta) / batch["""input_ids"""].shape[0]
A_ : Union[str, Any] = self.ids_to_clean_text(_A )
A_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
A_ : Optional[int] = self._step(_A )
A_ : Dict = dict(zip(self.loss_names ,_A ) )
A_ : str = self.calc_generative_metrics(_A ,_A )
A_ : Tuple = np.mean(lmap(_A ,_A ) )
base_metrics.update(gen_time=_A ,gen_len=_A ,preds=_A ,target=_A ,**_A )
return base_metrics
def _a ( self : int ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
return self._generative_step(_A )
def _a ( self : Any ,_a : List[Any] ):
'''simple docstring'''
return self.validation_epoch_end(_A ,prefix="""test""" )
def _a ( self : Any ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = self.n_obs[type_path]
A_ : Any = self.target_lens[type_path]
A_ : List[Any] = self.dataset_class(
self.tokenizer ,type_path=_A ,n_obs=_A ,max_target_length=_A ,**self.dataset_kwargs ,)
return dataset
def _a ( self : Any ,_a : str ,_a : int ,_a : bool = False ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : Dict = dataset.make_sortish_sampler(_A ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_A ,batch_size=_A ,collate_fn=dataset.collate_fn ,shuffle=_A ,num_workers=self.num_workers ,sampler=_A ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : List[str] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_A ,batch_sampler=_A ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
_A ,batch_size=_A ,collate_fn=dataset.collate_fn ,shuffle=_A ,num_workers=self.num_workers ,sampler=_A ,)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.get_dataloader("""train""" ,batch_size=self.hparams.train_batch_size ,shuffle=_A )
return dataloader
def _a ( self : int ):
'''simple docstring'''
return self.get_dataloader("""val""" ,batch_size=self.hparams.eval_batch_size )
def _a ( self : Optional[int] ):
'''simple docstring'''
return self.get_dataloader("""test""" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def _a ( _a : int ,_a : List[Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(_A ,_A )
add_generic_args(_A ,_A )
parser.add_argument(
"""--max_source_length""" ,default=1024 ,type=_A ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--max_target_length""" ,default=56 ,type=_A ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--val_max_target_length""" ,default=142 ,type=_A ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--test_max_target_length""" ,default=142 ,type=_A ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument("""--freeze_encoder""" ,action="""store_true""" )
parser.add_argument("""--freeze_embeds""" ,action="""store_true""" )
parser.add_argument("""--sortish_sampler""" ,action="""store_true""" ,default=_A )
parser.add_argument("""--overwrite_output_dir""" ,action="""store_true""" ,default=_A )
parser.add_argument("""--max_tokens_per_batch""" ,type=_A ,default=_A )
parser.add_argument("""--logger_name""" ,type=_A ,choices=["""default""", """wandb""", """wandb_shared"""] ,default="""default""" )
parser.add_argument("""--n_train""" ,type=_A ,default=-1 ,required=_A ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" ,type=_A ,default=500 ,required=_A ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" ,type=_A ,default=-1 ,required=_A ,help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" ,type=_A ,default="""summarization""" ,required=_A ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" ,type=_A ,default=0.0 ,required=_A )
parser.add_argument("""--src_lang""" ,type=_A ,default="""""" ,required=_A )
parser.add_argument("""--tgt_lang""" ,type=_A ,default="""""" ,required=_A )
parser.add_argument("""--eval_beams""" ,type=_A ,default=_A ,required=_A )
parser.add_argument(
"""--val_metric""" ,type=_A ,default=_A ,required=_A ,choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" ,type=_A ,default=_A ,help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" ,type=_A ,default=1 ,required=_A ,help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" ,type=_A ,default=-1 ,required=_A ,help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) ,)
return parser
class __lowerCAmelCase ( __lowercase ):
'''simple docstring'''
a_ = """translation"""
a_ = ["""loss"""]
a_ = ["""bleu"""]
a_ = """bleu"""
def __init__( self : Union[str, Any] ,_a : Tuple ,**_a : List[str] ):
'''simple docstring'''
super().__init__(_A ,**_A )
A_ : Optional[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def _a ( self : List[Any] ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
return calculate_bleu(_A ,_A )
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : int=None):
Path(args.output_dir).mkdir(exist_ok=__snake_case)
check_output_dir(__snake_case , expected_items=3)
if model is None:
if "summarization" in args.task:
A_ : List[str] = SummarizationModule(__snake_case)
else:
A_ : str = TranslationModule(__snake_case)
A_ : Union[str, Any] = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("""/tmp""")
or str(args.output_dir).startswith("""/var""")
):
A_ : int = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[Any] = os.environ.get("""WANDB_PROJECT""" , __snake_case)
A_ : List[str] = WandbLogger(name=model.output_dir.name , project=__snake_case)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}')
if args.early_stopping_patience >= 0:
A_ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience)
else:
A_ : Tuple = False
A_ : Optional[Any] = args.val_metric == """loss"""
A_ : Optional[int] = generic_train(
__snake_case , __snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __snake_case) , early_stopping_callback=__snake_case , logger=__snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""")
if not args.do_predict:
return model
A_ : Optional[Any] = """"""
A_ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""") , recursive=__snake_case))
if checkpoints:
A_ : Optional[int] = checkpoints[-1]
A_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = pl.Trainer.add_argparse_args(parser)
__magic_name__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__magic_name__ = parser.parse_args()
main(args)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
a_ = BarthezTokenizer
a_ = BarthezTokenizerFast
a_ = True
a_ = True
def _a ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
A_ : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=snake_case__ )
A_ : Any = tokenizer
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = """<pad>"""
A_ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(snake_case__ ) ,101122 )
def _a ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def _a ( self : Tuple ):
'''simple docstring'''
A_ : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : Dict = [0, 57, 3018, 70307, 91, 2]
A_ : Tuple = self.tokenizer(
snake_case__ ,max_length=len(snake_case__ ) ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors="""pt""" )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
A_ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def _a ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ : Any = self.get_tokenizer()
A_ : Dict = self.get_rust_tokenizer()
A_ : List[str] = """I was born in 92000, and this is falsé."""
A_ : Any = tokenizer.tokenize(snake_case__ )
A_ : str = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
A_ : int = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
A_ : int = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
A_ : List[Any] = self.get_rust_tokenizer()
A_ : Optional[Any] = tokenizer.encode(snake_case__ )
A_ : Any = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A_ : Tuple = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ ,model_name="""moussaKam/mbarthez""" ,revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" ,sequences=snake_case__ ,)
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase ( lowerCamelCase : List[Any]):
return getitem, k
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Tuple):
return setitem, k, v
def lowerCamelCase ( lowerCamelCase : int):
return delitem, k
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , *lowerCamelCase : Optional[int]):
try:
return fun(lowerCAmelCase_ , *lowerCAmelCase_), None
except Exception as e:
return None, e
__magic_name__ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
__magic_name__ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
__magic_name__ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
__magic_name__ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
__magic_name__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__magic_name__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items"""),
pytest.param(_overwrite_items , id="""overwrite items"""),
pytest.param(_delete_items , id="""delete items"""),
pytest.param(_access_absent_items , id="""access absent items"""),
pytest.param(_add_with_resize_up , id="""add with resize up"""),
pytest.param(_add_with_resize_down , id="""add with resize down"""),
) , )
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : int = HashMap(initial_block_size=4)
A_ : List[Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase_):
A_ : str = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_)
A_ : Union[str, Any] = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_)
assert my_res == py_res
assert str(lowerCAmelCase_) == str(lowerCAmelCase_)
assert set(lowerCAmelCase_) == set(lowerCAmelCase_)
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
assert set(my.items()) == set(py.items())
def lowerCamelCase ( ):
def is_public(lowerCamelCase : Any) -> bool:
return not name.startswith("""_""")
A_ : Any = {name for name in dir({}) if is_public(lowerCAmelCase_)}
A_ : Optional[int] = {name for name in dir(HashMap()) if is_public(lowerCAmelCase_)}
assert dict_public_names > hash_public_names
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : int):
if (voltage, current, resistance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if resistance < 0:
raise ValueError("""Resistance cannot be negative""")
if voltage == 0:
return {"voltage": float(current * resistance)}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__magic_name__ = 'CompVis/stable-diffusion-v1-1'
__magic_name__ = 'CompVis/stable-diffusion-v1-2'
__magic_name__ = 'CompVis/stable-diffusion-v1-3'
__magic_name__ = 'CompVis/stable-diffusion-v1-4'
class __lowerCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] ,_a : Optional[Any] ,_a : int ,_a : List[Any] ,_a : List[Any] ,_a : Tuple ,_a : Any ,_a : Optional[Any] ,_a : int = True ,):
'''simple docstring'''
super()._init_()
A_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE ,text_encoder=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE ,unet=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,safety_checker=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,requires_safety_checker=_SCREAMING_SNAKE_CASE ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return {k: getattr(self ,_SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def _a ( self : int ,_a : Dict = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def _a ( self : Any ):
'''simple docstring'''
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( self : int ,_a : Union[str, Any] ,_a : Any = 512 ,_a : List[Any] = 512 ,_a : int = 50 ,_a : Any = 7.5 ,_a : Any = None ,_a : Optional[Any] = 1 ,_a : List[Any] = 0.0 ,_a : Union[str, Any] = None ,_a : Tuple = None ,_a : int = "pil" ,_a : Optional[Any] = True ,_a : Optional[int] = None ,_a : Any = 1 ,**_a : int ,):
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def _a ( self : int ,_a : Dict ,_a : Optional[int] = 512 ,_a : str = 512 ,_a : Optional[int] = 50 ,_a : Optional[int] = 7.5 ,_a : Any = None ,_a : Tuple = 1 ,_a : Tuple = 0.0 ,_a : Union[str, Any] = None ,_a : Union[str, Any] = None ,_a : Union[str, Any] = "pil" ,_a : Optional[Any] = True ,_a : Optional[Any] = None ,_a : int = 1 ,**_a : int ,):
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def _a ( self : int ,_a : Optional[int] ,_a : List[Any] = 512 ,_a : Dict = 512 ,_a : str = 50 ,_a : Optional[Any] = 7.5 ,_a : Optional[int] = None ,_a : List[Any] = 1 ,_a : Optional[Any] = 0.0 ,_a : int = None ,_a : List[str] = None ,_a : int = "pil" ,_a : List[Any] = True ,_a : List[str] = None ,_a : List[str] = 1 ,**_a : List[str] ,):
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def _a ( self : List[str] ,_a : str ,_a : Union[str, Any] = 512 ,_a : Optional[Any] = 512 ,_a : List[Any] = 50 ,_a : Optional[int] = 7.5 ,_a : Optional[int] = None ,_a : Dict = 1 ,_a : int = 0.0 ,_a : int = None ,_a : Optional[Any] = None ,_a : Optional[Any] = "pil" ,_a : Optional[int] = True ,_a : Optional[int] = None ,_a : Optional[Any] = 1 ,**_a : Optional[int] ,):
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def _a ( self : List[str] ,_a : Optional[int] ,_a : Dict = 512 ,_a : Tuple = 512 ,_a : Any = 50 ,_a : int = 7.5 ,_a : Optional[Any] = None ,_a : Optional[int] = 1 ,_a : str = 0.0 ,_a : Optional[Any] = None ,_a : Optional[Any] = None ,_a : List[str] = "pil" ,_a : int = True ,_a : List[Any] = None ,_a : Any = 1 ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
A_ : Optional[Any] = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# Get first result from Stable Diffusion Checkpoint v1.2
A_ : Optional[Any] = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# Get first result from Stable Diffusion Checkpoint v1.3
A_ : Any = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# Get first result from Stable Diffusion Checkpoint v1.4
A_ : int = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE ,height=_SCREAMING_SNAKE_CASE ,width=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE ,guidance_scale=_SCREAMING_SNAKE_CASE ,negative_prompt=_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,output_type=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,callback=_SCREAMING_SNAKE_CASE ,callback_steps=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__magic_name__ = '''3'''
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
from PIL import Image
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ , A_ : Dict = image.size
A_ : Dict = 0
A_ : Dict = image.load()
for i in range(__snake_case):
for j in range(__snake_case):
A_ : Optional[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__snake_case):
for i in range(__snake_case):
A_ : str = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__magic_name__ = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ):
'''simple docstring'''
A_ : str = {}
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Any ,_a : Optional[int]=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A_ : str = [[w, v]]
if not self.graph.get(lowerCamelCase_ ):
A_ : Tuple = []
def _a ( self : Optional[Any] ):
'''simple docstring'''
return list(self.graph )
def _a ( self : Optional[int] ,_a : Any ,_a : str ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
def _a ( self : List[str] ,_a : Union[str, Any]=-2 ,_a : str=-1 ):
'''simple docstring'''
if s == d:
return []
A_ : List[str] = []
A_ : List[str] = []
if s == -2:
A_ : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
A_ : int = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : Any = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def _a ( self : Dict ,_a : Optional[int]=-1 ):
'''simple docstring'''
if c == -1:
A_ : str = floor(random() * 10000 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ ,lowerCamelCase_ ,1 )
def _a ( self : List[Any] ,_a : Any=-2 ):
'''simple docstring'''
A_ : str = deque()
A_ : List[str] = []
if s == -2:
A_ : int = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
A_ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a ( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _a ( self : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
return len(self.graph[u] )
def _a ( self : str ,_a : Optional[Any]=-2 ):
'''simple docstring'''
A_ : List[Any] = []
A_ : List[str] = []
if s == -2:
A_ : Union[str, Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : Union[str, Any] = s
A_ : List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : int = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase_ ) != 0:
A_ : List[Any] = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : int = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return sorted_nodes
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = []
A_ : Any = []
A_ : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : Any = -2
A_ : int = []
A_ : Union[str, Any] = s
A_ : Tuple = False
A_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Any = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : int = True
if len(lowerCamelCase_ ) != 0:
A_ : Any = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : List[str] = False
indirect_parents.append(lowerCamelCase_ )
A_ : List[Any] = s
A_ : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Tuple = []
A_ : int = []
A_ : Any = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : Union[str, Any] = -2
A_ : List[Any] = []
A_ : Tuple = s
A_ : Dict = False
A_ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : str = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Dict = True
if len(lowerCamelCase_ ) != 0:
A_ : List[str] = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : List[Any] = False
indirect_parents.append(lowerCamelCase_ )
A_ : Optional[int] = s
A_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def _a ( self : Any ,_a : str=-2 ,_a : int=-1 ):
'''simple docstring'''
A_ : Optional[Any] = time()
self.dfs(lowerCamelCase_ ,lowerCamelCase_ )
A_ : List[str] = time()
return end - begin
def _a ( self : int ,_a : Tuple=-2 ):
'''simple docstring'''
A_ : Optional[int] = time()
self.bfs(lowerCamelCase_ )
A_ : List[str] = time()
return end - begin
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = {}
def _a ( self : List[str] ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any]=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A_ : Any = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A_ : Any = [[w, u]]
def _a ( self : Tuple ,_a : Tuple ,_a : Any ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
# the other way round
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase_ )
def _a ( self : int ,_a : str=-2 ,_a : List[str]=-1 ):
'''simple docstring'''
if s == d:
return []
A_ : List[str] = []
A_ : Any = []
if s == -2:
A_ : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
A_ : Any = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : List[str] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def _a ( self : Any ,_a : List[str]=-1 ):
'''simple docstring'''
if c == -1:
A_ : Any = floor(random() * 10000 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ ,lowerCamelCase_ ,1 )
def _a ( self : List[str] ,_a : List[Any]=-2 ):
'''simple docstring'''
A_ : List[Any] = deque()
A_ : Tuple = []
if s == -2:
A_ : Any = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
A_ : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a ( self : Tuple ,_a : str ):
'''simple docstring'''
return len(self.graph[u] )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[str] = []
A_ : Any = []
A_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : Optional[int] = -2
A_ : Tuple = []
A_ : Any = s
A_ : Optional[Any] = False
A_ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Optional[Any] = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Optional[int] = True
if len(lowerCamelCase_ ) != 0:
A_ : int = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : Union[str, Any] = False
indirect_parents.append(lowerCamelCase_ )
A_ : List[str] = s
A_ : str = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : int = []
A_ : Any = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
A_ : Dict = -2
A_ : int = []
A_ : int = s
A_ : Union[str, Any] = False
A_ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Optional[Any] = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Any = True
if len(lowerCamelCase_ ) != 0:
A_ : str = stack[len(lowerCamelCase_ ) - 1]
else:
A_ : Optional[Any] = False
indirect_parents.append(lowerCamelCase_ )
A_ : Any = s
A_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def _a ( self : List[Any] ):
'''simple docstring'''
return list(self.graph )
def _a ( self : str ,_a : List[str]=-2 ,_a : str=-1 ):
'''simple docstring'''
A_ : str = time()
self.dfs(lowerCamelCase_ ,lowerCamelCase_ )
A_ : Tuple = time()
return end - begin
def _a ( self : Tuple ,_a : Dict=-2 ):
'''simple docstring'''
A_ : Optional[Any] = time()
self.bfs(lowerCamelCase_ )
A_ : Dict = time()
return end - begin
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
return sum(i for i in range(1 , number // 2 + 1) if number % i == 0) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__magic_name__ = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spm_char.model'}
__magic_name__ = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
__magic_name__ = {
'microsoft/speecht5_asr': 1_024,
'microsoft/speecht5_tts': 1_024,
'microsoft/speecht5_vc': 1_024,
}
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] ,_a : Tuple ,_a : List[str]="<s>" ,_a : Tuple="</s>" ,_a : Tuple="<unk>" ,_a : Dict="<pad>" ,_a : Optional[Dict[str, Any]] = None ,**_a : Dict ,):
'''simple docstring'''
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Dict = vocab_file
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Dict = None
return state
def __setstate__( self : List[str] ,_a : List[str] ):
'''simple docstring'''
A_ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : str = {}
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : List[Any] ,_a : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : Dict ,_a : Any ):
'''simple docstring'''
A_ : int = self.sp_model.IdToPiece(_a )
return token
def _a ( self : List[str] ,_a : Any ):
'''simple docstring'''
A_ : List[Any] = []
A_ : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
A_ : List[Any] = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : str ,_a : List[Any] ,_a : List[str]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
A_ : Optional[Any] = [1]
if token_ids_a is None:
return ([0] * len(_a )) + suffix_ones
return ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def _a ( self : List[Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
'''simple docstring'''
def _a ( self : Tuple ,_a : List[str] ,_a : Any ,_a : Optional[int] ):
'''simple docstring'''
return None
class __lowerCAmelCase :
'''simple docstring'''
def _a ( self : Tuple ,_a : Dict ,_a : Union[str, Any] ,_a : Dict ,_a : Tuple ):
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ ,"""tf""" ,12 ,**lowercase_ )
@require_torch
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ ,"""pt""" ,12 ,**lowercase_ )
@require_torch
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
from transformers import BertModel
A_ : Tuple = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowercase_ ) )
vocab_file.flush()
A_ : Union[str, Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ : str = BertModel(BertConfig(vocab_size=len(lowercase_ ) ) )
model.save_pretrained(lowercase_ )
self._test_export(lowercase_ ,"""pt""" ,12 ,lowercase_ )
@require_tf
@slow
def _a ( self : Dict ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : List[Any] = self._test_export(lowercase_ ,"""tf""" ,12 ,**lowercase_ )
A_ : Union[str, Any] = quantize(Path(lowercase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _a ( self : Dict ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Optional[int] = self._test_export(lowercase_ ,"""pt""" ,12 ,**lowercase_ )
A_ : Dict = quantize(lowercase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _a ( self : Optional[int] ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[int] ,_a : str=None ,**_a : List[str] ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ : str = Path(lowercase_ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ )
return path
except Exception as e:
self.fail(lowercase_ )
@require_torch
@require_tokenizers
@slow
def _a ( self : Dict ):
'''simple docstring'''
from transformers import BertModel
A_ : Optional[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : List[str] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ ,lowercase_ ,"""pt""" )
@require_tf
@require_tokenizers
@slow
def _a ( self : str ):
'''simple docstring'''
from transformers import TFBertModel
A_ : List[Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ ,lowercase_ ,"""tf""" )
def _a ( self : Any ,_a : List[str] ,_a : str ,_a : List[str] ):
'''simple docstring'''
A_ : Optional[int] = FeatureExtractionPipeline(lowercase_ ,lowercase_ )
A_ : Tuple = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
A_ , A_ , A_ , A_ : Union[str, Any] = infer_shapes(lowercase_ ,lowercase_ )
# Assert all variables are present
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,lowercase_ )
self.assertSequenceEqual(variable_names[3:] ,lowercase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = ["""input_ids""", """attention_mask""", """token_type_ids"""]
A_ : int = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
A_ , A_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() ,lowercase_ ,lowercase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase_ ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase_ ) ,set(lowercase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase_ ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_ , A_ : str = ensure_valid_input(FuncNonContiguousArgs() ,lowercase_ ,lowercase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase_ ) ,1 )
self.assertEqual(len(lowercase_ ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] ,"""input_ids""" )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ = CTRLTokenizer
a_ = False
a_ = False
def _a ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : str = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A_ : Union[str, Any] = dict(zip(lowerCAmelCase_ ,range(len(lowerCAmelCase_ ) ) ) )
A_ : Any = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A_ : str = {"""unk_token""": """<unk>"""}
A_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def _a ( self : int ,**_a : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase_ )
def _a ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = """adapt react readapt apt"""
A_ : Optional[int] = """adapt react readapt apt"""
return input_text, output_text
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A_ : Tuple = """adapt react readapt apt"""
A_ : int = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A_ : Any = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
A_ : int = tokens + [tokenizer.unk_token]
A_ : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) ,lowerCAmelCase_ )
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCamelCase ( lowerCamelCase : List[str] = 8):
'''simple docstring'''
A_ : Tuple = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCamelCase) for _ in range(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]):
'''simple docstring'''
i -= len(lowerCamelCase)
A_ : str = i // 3
A_ : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
A_ : List[Any] = (
chars_incl
+ random(lowerCamelCase , quotient + remainder)
+ random(lowerCamelCase , lowerCamelCase)
+ random(lowerCamelCase , lowerCamelCase)
)
A_ : str = list(lowerCamelCase)
shuffle(lowerCamelCase)
return "".join(lowerCamelCase)
# random is a generalised function for letters, characters and numbers
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]):
'''simple docstring'''
return "".join(secrets.choice(lowerCamelCase) for _ in range(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : List[Any]):
'''simple docstring'''
pass # Put your code here...
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[str]):
'''simple docstring'''
pass # Put your code here...
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]):
'''simple docstring'''
pass # Put your code here...
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : str = 8):
'''simple docstring'''
if len(lowerCamelCase) < min_length:
# Your Password must be at least 8 characters long
return False
A_ : str = any(char in ascii_uppercase for char in password)
A_ : Tuple = any(char in ascii_lowercase for char in password)
A_ : Dict = any(char in digits for char in password)
A_ : Any = any(char in punctuation for char in password)
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = int(input("""Please indicate the max length of your password: """).strip())
A_ : Tuple = input(
"""Please indicate the characters that must be in your password: """).strip()
print("""Password generated:""" , password_generator(lowerCamelCase))
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCamelCase , lowerCamelCase) , )
print("""[If you are thinking of using this passsword, You better save it.]""")
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
from math import isqrt
def lowerCamelCase ( lowerCamelCase : int):
return all(number % divisor != 0 for divisor in range(2 , isqrt(__a) + 1))
def lowerCamelCase ( lowerCamelCase : int = 10**6):
A_ : Optional[Any] = 0
A_ : Optional[Any] = 1
A_ : Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(__a)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( enum.Enum ):
'''simple docstring'''
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(_snake_case )
class __lowerCAmelCase ( _snake_case ):
'''simple docstring'''
a_ = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : int ,*_a : Optional[int] ,**_a : int ):
'''simple docstring'''
super().__init__(*snake_case_ ,**snake_case_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A_ : int = None
if self.model.config.prefix is not None:
A_ : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A_ : str = self._sanitize_parameters(prefix=snake_case_ ,**self._forward_params )
A_ : str = {**self._preprocess_params, **preprocess_params}
A_ : Optional[int] = {**self._forward_params, **forward_params}
def _a ( self : str ,_a : Union[str, Any]=None ,_a : Optional[Any]=None ,_a : str=None ,_a : List[str]=None ,_a : Optional[Any]=None ,_a : Tuple=None ,_a : Optional[int]=None ,_a : Any=None ,**_a : str ,):
'''simple docstring'''
A_ : Optional[int] = {}
if prefix is not None:
A_ : List[str] = prefix
if prefix:
A_ : Dict = self.tokenizer(
snake_case_ ,padding=snake_case_ ,add_special_tokens=snake_case_ ,return_tensors=self.framework )
A_ : Dict = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
""" [None, 'hole']""" )
A_ : Any = handle_long_generation
preprocess_params.update(snake_case_ )
A_ : int = generate_kwargs
A_ : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
A_ : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
A_ : str = ReturnType.TENSORS
if return_type is not None:
A_ : List[str] = return_type
if clean_up_tokenization_spaces is not None:
A_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ : List[str] = self.tokenizer.encode(snake_case_ ,add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
A_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self : List[Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*snake_case_ ,**snake_case_ )
def __call__( self : Tuple ,_a : Union[str, Any] ,**_a : List[Any] ):
'''simple docstring'''
return super().__call__(snake_case_ ,**snake_case_ )
def _a ( self : Tuple ,_a : Optional[Any] ,_a : int="" ,_a : List[Any]=None ,**_a : List[Any] ):
'''simple docstring'''
A_ : Dict = self.tokenizer(
prefix + prompt_text ,padding=snake_case_ ,add_special_tokens=snake_case_ ,return_tensors=self.framework )
A_ : Any = prompt_text
if handle_long_generation == "hole":
A_ : Dict = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
A_ : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
A_ : int = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A_ : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
A_ : Optional[Any] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
A_ : int = inputs["attention_mask"][:, -keep_length:]
return inputs
def _a ( self : List[str] ,_a : int ,**_a : str ):
'''simple docstring'''
A_ : str = model_inputs["input_ids"]
A_ : Optional[Any] = model_inputs.get("""attention_mask""" ,snake_case_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
A_ : List[Any] = None
A_ : str = None
A_ : Tuple = 1
else:
A_ : Optional[Any] = input_ids.shape[0]
A_ : Optional[int] = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A_ : Union[str, Any] = generate_kwargs.pop("""prefix_length""" ,0 )
if prefix_length > 0:
A_ : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
A_ : Optional[int] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A_ : int = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A_ : Optional[int] = self.model.generate(input_ids=snake_case_ ,attention_mask=snake_case_ ,**snake_case_ )
A_ : int = generated_sequence.shape[0]
if self.framework == "pt":
A_ : List[Any] = generated_sequence.reshape(snake_case_ ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
A_ : Union[str, Any] = tf.reshape(snake_case_ ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self : Optional[int] ,_a : List[Any] ,_a : Any=ReturnType.FULL_TEXT ,_a : Union[str, Any]=True ):
'''simple docstring'''
A_ : str = model_outputs["generated_sequence"][0]
A_ : int = model_outputs["input_ids"]
A_ : Optional[int] = model_outputs["prompt_text"]
A_ : List[Any] = generated_sequence.numpy().tolist()
A_ : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A_ : int = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A_ : int = self.tokenizer.decode(
snake_case_ ,skip_special_tokens=snake_case_ ,clean_up_tokenization_spaces=snake_case_ ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A_ : Dict = 0
else:
A_ : Any = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=snake_case_ ,clean_up_tokenization_spaces=snake_case_ ,) )
if return_type == ReturnType.FULL_TEXT:
A_ : str = prompt_text + text[prompt_length:]
else:
A_ : Tuple = text[prompt_length:]
A_ : List[Any] = {"generated_text": all_text}
records.append(snake_case_ )
return records
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float):
if (
not isinstance(lowerCamelCase , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""")
return apparent_power * power_factor
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float):
if (
not isinstance(lowerCamelCase , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""")
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
__magic_name__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( lowerCamelCase : Dict):
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase):
A_ : Dict = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(_lowerCAmelCase)
A_ : int = """""".join(bin(_lowerCAmelCase)[2:].zfill(8) for byte in data)
A_ : int = len(_lowerCAmelCase) % 6 != 0
if padding_needed:
# The padding that will be added later
A_ : Tuple = b"""=""" * ((6 - len(_lowerCAmelCase) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase) % 6)
else:
A_ : Any = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2)]
for index in range(0 , len(_lowerCAmelCase) , 6)).encode()
+ padding
)
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(_lowerCAmelCase , _lowerCAmelCase):
A_ : Union[str, Any] = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(_lowerCAmelCase)
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
try:
A_ : Optional[int] = encoded_data.decode("""utf-8""")
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""")
A_ : List[Any] = encoded_data.count("""=""")
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding]), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A_ : Tuple = encoded_data[:-padding]
A_ : List[str] = """""".join(
bin(B64_CHARSET.index(_lowerCAmelCase))[2:].zfill(6) for char in encoded_data)[: -padding * 2]
else:
A_ : Optional[int] = """""".join(
bin(B64_CHARSET.index(_lowerCAmelCase))[2:].zfill(6) for char in encoded_data)
A_ : Dict = [
int(binary_stream[index : index + 8] , 2)
for index in range(0 , len(_lowerCAmelCase) , 8)
]
return bytes(_lowerCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = pipeline(task="""text-generation""" ,model="""sshleifer/tiny-ctrl""" ,framework="""pt""" )
# Using `do_sample=False` to force deterministic output
A_ : Optional[int] = text_generator("""This is a test""" ,do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] ,)
A_ : Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowerCamelCase ,[
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] ,)
A_ : Optional[int] = text_generator("""This is a test""" ,do_sample=__lowerCamelCase ,num_return_sequences=2 ,return_tensors=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[
{"""generated_token_ids""": ANY(__lowerCamelCase )},
{"""generated_token_ids""": ANY(__lowerCamelCase )},
] ,)
A_ : Union[str, Any] = text_generator.model.config.eos_token_id
A_ : str = '''<pad>'''
A_ : Optional[int] = text_generator(
["""This is a test""", """This is a second test"""] ,do_sample=__lowerCamelCase ,num_return_sequences=2 ,batch_size=2 ,return_tensors=__lowerCamelCase ,)
self.assertEqual(
__lowerCamelCase ,[
[
{"""generated_token_ids""": ANY(__lowerCamelCase )},
{"""generated_token_ids""": ANY(__lowerCamelCase )},
],
[
{"""generated_token_ids""": ANY(__lowerCamelCase )},
{"""generated_token_ids""": ANY(__lowerCamelCase )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = pipeline(task="""text-generation""" ,model="""sshleifer/tiny-ctrl""" ,framework="""tf""" )
# Using `do_sample=False` to force deterministic output
A_ : str = text_generator("""This is a test""" ,do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] ,)
A_ : List[str] = text_generator(["""This is a test""", """This is a second test"""] ,do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] ,)
def _a ( self : List[Any] ,_a : List[str] ,_a : Tuple ,_a : str ):
'''simple docstring'''
A_ : int = TextGenerationPipeline(model=__lowerCamelCase ,tokenizer=__lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def _a ( self : Dict ):
'''simple docstring'''
A_ : Tuple = '''Hello I believe in'''
A_ : str = pipeline("""text-generation""" ,model="""hf-internal-testing/tiny-random-gpt2""" )
A_ : Dict = text_generator(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] ,)
A_ : Dict = text_generator(__lowerCamelCase ,stop_sequence=""" fe""" )
self.assertEqual(__lowerCamelCase ,[{"""generated_text""": """Hello I believe in fe"""}] )
def _a ( self : Optional[Any] ,_a : Any ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = text_generator.model
A_ : Union[str, Any] = text_generator.tokenizer
A_ : Optional[int] = text_generator("""This is a test""" )
self.assertEqual(__lowerCamelCase ,[{"""generated_text""": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
A_ : List[Any] = text_generator("""This is a test""" ,return_full_text=__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,[{"""generated_text""": ANY(__lowerCamelCase )}] )
self.assertNotIn("""This is a test""" ,outputs[0]["""generated_text"""] )
A_ : str = pipeline(task="""text-generation""" ,model=__lowerCamelCase ,tokenizer=__lowerCamelCase ,return_full_text=__lowerCamelCase )
A_ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__lowerCamelCase ,[{"""generated_text""": ANY(__lowerCamelCase )}] )
self.assertNotIn("""This is a test""" ,outputs[0]["""generated_text"""] )
A_ : List[Any] = text_generator("""This is a test""" ,return_full_text=__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,[{"""generated_text""": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
A_ : List[str] = text_generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[
[{"""generated_text""": ANY(__lowerCamelCase )}, {"""generated_text""": ANY(__lowerCamelCase )}],
[{"""generated_text""": ANY(__lowerCamelCase )}, {"""generated_text""": ANY(__lowerCamelCase )}],
] ,)
if text_generator.tokenizer.pad_token is not None:
A_ : List[str] = text_generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase ,[
[{"""generated_text""": ANY(__lowerCamelCase )}, {"""generated_text""": ANY(__lowerCamelCase )}],
[{"""generated_text""": ANY(__lowerCamelCase )}, {"""generated_text""": ANY(__lowerCamelCase )}],
] ,)
with self.assertRaises(__lowerCamelCase ):
A_ : List[str] = text_generator("""test""" ,return_full_text=__lowerCamelCase ,return_text=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
A_ : List[Any] = text_generator("""test""" ,return_full_text=__lowerCamelCase ,return_tensors=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
A_ : Union[str, Any] = text_generator("""test""" ,return_text=__lowerCamelCase ,return_tensors=__lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
A_ : int = text_generator("""""" )
self.assertEqual(__lowerCamelCase ,[{"""generated_text""": ANY(__lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
A_ : List[Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
A_ : Tuple = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 ,max_new_tokens=20 )
A_ : List[str] = text_generator("""This is a test""" * 500 ,handle_long_generation="""hole""" ,max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCamelCase ):
text_generator(
"""This is a test""" * 500 ,handle_long_generation="""hole""" ,max_new_tokens=tokenizer.model_max_length + 10 ,)
@require_torch
@require_accelerate
@require_torch_gpu
def _a ( self : List[str] ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
A_ : List[str] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" ,model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} ,)
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa )
A_ : Any = pipe("""This is a test""" )
self.assertEqual(
__lowerCamelCase ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
A_ : Union[str, Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" ,torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa )
A_ : List[Any] = pipe("""This is a test""" )
self.assertEqual(
__lowerCamelCase ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
A_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" )
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.floataa )
A_ : Tuple = pipe("""This is a test""" )
self.assertEqual(
__lowerCamelCase ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
@require_torch
@require_torch_gpu
def _a ( self : Optional[Any] ):
'''simple docstring'''
import torch
A_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device=0 ,torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _a ( self : Optional[Any] ):
'''simple docstring'''
import torch
A_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" ,torch_dtype=torch.floataa )
pipe("""This is a test""" ,do_sample=__lowerCamelCase ,top_p=0.5 )
def _a ( self : Any ):
'''simple docstring'''
A_ : str = '''Hello world'''
A_ : Optional[int] = pipeline("""text-generation""" ,model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
A_ : Tuple = logging.get_logger("""transformers.generation.tf_utils""" )
else:
A_ : str = logging.get_logger("""transformers.generation.utils""" )
A_ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCamelCase ) as cl:
A_ : Union[str, Any] = text_generator(__lowerCamelCase ,max_length=10 ,max_new_tokens=1 )
self.assertIn(__lowerCamelCase ,cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCamelCase ) as cl:
A_ : Dict = text_generator(__lowerCamelCase ,max_new_tokens=1 )
self.assertNotIn(__lowerCamelCase ,cl.out )
with CaptureLogger(__lowerCamelCase ) as cl:
A_ : Tuple = text_generator(__lowerCamelCase ,max_length=10 )
self.assertNotIn(__lowerCamelCase ,cl.out )
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a_ = """bit"""
a_ = ["""preactivation""", """bottleneck"""]
a_ = ["""SAME""", """VALID"""]
def __init__( self : List[str] ,_a : int=3 ,_a : List[str]=64 ,_a : Optional[int]=[256, 512, 1024, 2048] ,_a : Optional[int]=[3, 4, 6, 3] ,_a : int="preactivation" ,_a : Union[str, Any]="relu" ,_a : List[Any]=None ,_a : Tuple=32 ,_a : Optional[int]=0.0 ,_a : Union[str, Any]=False ,_a : Dict=32 ,_a : Union[str, Any]=1 ,_a : List[Any]=None ,_a : Any=None ,**_a : str ,):
'''simple docstring'''
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A_ : List[str] = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
A_ : int = num_channels
A_ : Any = embedding_size
A_ : int = hidden_sizes
A_ : int = depths
A_ : List[str] = layer_type
A_ : Optional[Any] = hidden_act
A_ : Dict = global_padding
A_ : Any = num_groups
A_ : Dict = drop_path_rate
A_ : List[Any] = embedding_dynamic_padding
A_ : Any = output_stride
A_ : List[Any] = width_factor
A_ : Union[str, Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 ,len(_lowercase ) + 1 )]
A_ , A_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase ,out_indices=_lowercase ,stage_names=self.stage_names )
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__magic_name__ = TypeVar('T')
__magic_name__ = TypeVar('U')
class __lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ,_a : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = key
A_ : List[Any] = val
A_ : DoubleLinkedListNode[T, U] | None = None
A_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ):
'''simple docstring'''
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
A_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ ,lowercase_ )
A_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ ,lowercase_ )
A_ : Union[str, Any] = self.rear, self.head
def __repr__( self : List[str] ):
'''simple docstring'''
A_ : Dict = ["DoubleLinkedList"]
A_ : Dict = self.head
while node.next is not None:
rep.append(str(lowercase_ ) )
A_ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase_ )
def _a ( self : int ,_a : List[Any] ):
'''simple docstring'''
A_ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
A_ : Tuple = node
A_ : str = previous
A_ : Optional[Any] = node
A_ : Any = self.rear
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
A_ : Union[str, Any] = node.next
A_ : Optional[int] = node.prev
A_ : str = None
A_ : int = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
a_ = {}
def __init__( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : DoubleLinkedList[T, U] = DoubleLinkedList()
A_ : List[str] = capacity
A_ : Any = 0
A_ : Dict = 0
A_ : Union[str, Any] = 0
A_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : str ):
'''simple docstring'''
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str ,_a : int ):
'''simple docstring'''
return key in self.cache
def _a ( self : Any ,_a : int ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
A_ : DoubleLinkedListNode[T, U] = self.cache[key]
A_ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_ )
return node.val
self.miss += 1
return None
def _a ( self : Tuple ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
A_ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
A_ : Dict = DoubleLinkedListNode(lowercase_ ,lowercase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
A_ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
A_ : Optional[Any] = value
self.list.add(lowercase_ )
@classmethod
def _a ( cls : Optional[Any] ,_a : str = 128 ):
'''simple docstring'''
def cache_decorator_inner(_a : str ) -> Callable[..., U]:
def cache_decorator_wrapper(*_a : Any ) -> U:
if func not in cls.decorator_function_to_instance_map:
A_ : List[str] = LRUCache(lowercase_ )
A_ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
A_ : Any = func(*lowercase_ )
cls.decorator_function_to_instance_map[func].put(args[0] ,lowercase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_ ,"""cache_info""" ,lowercase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]):
# Initialise PyTorch model
A_ : Optional[Any] = BertConfig.from_json_file(lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
A_ : List[Any] = BertForPreTraining(lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import numpy
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ,_a : numpy.ndarray ,_a : numpy.ndarray ):
'''simple docstring'''
A_ : List[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
A_ : Any = numpy.random.rand(
self.input_array.shape[1] ,4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
A_ : int = numpy.random.rand(
4 ,3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
A_ : Optional[int] = numpy.random.rand(3 ,1 )
# Real output values provided.
A_ : Tuple = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
A_ : Optional[Any] = numpy.zeros(output_array.shape )
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[Any] = sigmoid(
numpy.dot(self.input_array ,self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
A_ : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
A_ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return self.layer_between_second_hidden_layer_and_output
def _a ( self : Dict ):
'''simple docstring'''
A_ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T ,2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,)
A_ : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T ,numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,)
A_ : str = numpy.dot(
self.input_array.T ,numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,self.first_hidden_layer_and_second_hidden_layer_weights.T ,)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) ,)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _a ( self : Any ,_a : numpy.ndarray ,_a : int ,_a : bool ):
'''simple docstring'''
for iteration in range(1 ,iterations + 1 ):
A_ : Optional[int] = self.feedforward()
self.back_propagation()
if give_loss:
A_ : str = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def _a ( self : int ,_a : numpy.ndarray ):
'''simple docstring'''
A_ : Dict = input_arr
A_ : Optional[int] = sigmoid(
numpy.dot(self.array ,self.input_layer_and_first_hidden_layer_weights ) )
A_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
A_ : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase ( lowerCamelCase : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def lowerCamelCase ( lowerCamelCase : numpy.ndarray):
return (value) * (1 - (value))
def lowerCamelCase ( ):
A_ : Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
A_ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
A_ : Any = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__magic_name__ = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__magic_name__ = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__magic_name__ = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__magic_name__ = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__magic_name__ = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]):
for tf_name, hf_name in patterns:
A_ : Optional[Any] = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return k
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : dict):
A_ : Any = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE)
A_ : str = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE)
A_ : Any = torch_model.state_dict()
A_ : Union[str, Any] = {}
# separating decoder weights
A_ : List[str] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""")}
A_ : int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""")}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion"""):
A_ : List[str] = [k.endswith(_SCREAMING_SNAKE_CASE) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE):
continue
A_ : Optional[Any] = DECODER_PATTERNS
A_ : Optional[int] = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})')
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""]):
A_ : Tuple = v.T
A_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE)
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion"""):
A_ : List[str] = [k.endswith(_SCREAMING_SNAKE_CASE) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE):
continue
A_ : Union[str, Any] = REMAINING_PATTERNS
A_ : str = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})')
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""]):
A_ : int = v.T
A_ : Union[str, Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE)
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
A_ : Union[str, Any] = mapping["""model.embed_positions.weight"""]
A_ : Optional[Any] = mapping.pop("""model.embed_positions.weight""")
A_ , A_ : int = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE)
A_ : List[Any] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Optional[int] = tf.train.list_variables(_SCREAMING_SNAKE_CASE)
A_ : Any = {}
A_ : List[Any] = ["""global_step"""]
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict"""):
A_ : List[Any] = any(pat in name for pat in ignore_name)
if skip_key:
continue
A_ : Optional[Any] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
A_ : int = array
return tf_weights
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : dict):
A_ : Any = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE)
A_ : Dict = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__magic_name__ = parser.parse_args()
__magic_name__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ,_a : Dict ,_a : str ):
'''simple docstring'''
A_ : List[Any] = question_encoder
A_ : Dict = generator
A_ : Any = self.question_encoder
def _a ( self : List[Any] ,_a : int ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_a ,exist_ok=_a )
A_ : Any = os.path.join(_a ,"""question_encoder_tokenizer""" )
A_ : List[Any] = os.path.join(_a ,"""generator_tokenizer""" )
self.question_encoder.save_pretrained(_a )
self.generator.save_pretrained(_a )
@classmethod
def _a ( cls : int ,_a : List[str] ,**_a : List[Any] ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
A_ : Optional[int] = kwargs.pop("""config""" ,_a )
if config is None:
A_ : str = RagConfig.from_pretrained(_a )
A_ : Optional[int] = AutoTokenizer.from_pretrained(
_a ,config=config.question_encoder ,subfolder="""question_encoder_tokenizer""" )
A_ : Tuple = AutoTokenizer.from_pretrained(
_a ,config=config.generator ,subfolder="""generator_tokenizer""" )
return cls(question_encoder=_a ,generator=_a )
def __call__( self : Optional[int] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.current_tokenizer(*_a ,**_a )
def _a ( self : Optional[int] ,*_a : List[Any] ,**_a : Tuple ):
'''simple docstring'''
return self.generator.batch_decode(*_a ,**_a )
def _a ( self : Dict ,*_a : Dict ,**_a : List[Any] ):
'''simple docstring'''
return self.generator.decode(*_a ,**_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Optional[Any] = self.question_encoder
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.generator
def _a ( self : int ,_a : Any ,_a : List[str] = None ,_a : Dict = None ,_a : Dict = None ,_a : List[Any] = "longest" ,_a : List[str] = None ,_a : str = True ,**_a : Union[str, Any] ,):
'''simple docstring'''
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" ,_a ,)
if max_length is None:
A_ : Union[str, Any] = self.current_tokenizer.model_max_length
A_ : Union[str, Any] = self(
_a ,add_special_tokens=_a ,return_tensors=_a ,max_length=_a ,padding=_a ,truncation=_a ,**_a ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
A_ : List[str] = self.current_tokenizer.model_max_length
A_ : List[Any] = self(
text_target=_a ,add_special_tokens=_a ,return_tensors=_a ,padding=_a ,max_length=_a ,truncation=_a ,**_a ,)
A_ : List[Any] = labels["""input_ids"""]
return model_inputs
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Dict = len(lowerCAmelCase__)
A_ : List[Any] = len(lowerCAmelCase__)
A_ : str = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Optional[Any] = True
for i in range(lowerCAmelCase__):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Union[str, Any] = True
if a[i].islower():
A_ : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__magic_name__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class __lowerCAmelCase ( a__ ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = "left"
def __init__( self : Tuple ,_a : List[Any] ,_a : Tuple=False ,_a : int=True ,_a : str=False ,_a : Dict="<s>" ,_a : Tuple="</s>" ,_a : str="<unk>" ,_a : Tuple="<sep>" ,_a : Optional[int]="<pad>" ,_a : Union[str, Any]="<cls>" ,_a : List[str]="<mask>" ,_a : Tuple=["<eop>", "<eod>"] ,_a : int = None ,**_a : Dict ,):
'''simple docstring'''
A_ : List[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
A_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ ,remove_space=lowerCamelCase_ ,keep_accents=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,additional_special_tokens=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase_ ,)
A_ : List[str] = 3
A_ : Any = do_lower_case
A_ : List[Any] = remove_space
A_ : List[Any] = keep_accents
A_ : Dict = vocab_file
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A_ : Dict = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__( self : Any ,_a : int ):
'''simple docstring'''
A_ : List[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Dict ,_a : Dict ):
'''simple docstring'''
if self.remove_space:
A_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
A_ : str = inputs
A_ : List[str] = outputs.replace("""``""" ,"""\"""" ).replace("""\'\'""" ,"""\"""" )
if not self.keep_accents:
A_ : str = unicodedata.normalize("""NFKD""" ,lowerCamelCase_ )
A_ : List[Any] = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
A_ : List[str] = outputs.lower()
return outputs
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.preprocess_text(lowerCamelCase_ )
A_ : List[Any] = self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ )
A_ : str = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : str = cur_pieces[1:]
else:
A_ : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def _a ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def _a ( self : List[Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def _a ( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ ,""" """ ).strip()
return out_string
def _a ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Dict = False ,_a : int = None ,_a : Optional[Any] = True ,**_a : Dict ,):
'''simple docstring'''
A_ : List[str] = kwargs.pop("""use_source_tokenizer""" ,lowerCamelCase_ )
A_ : List[Any] = self.convert_ids_to_tokens(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : List[str] = []
A_ : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
A_ : Tuple = []
sub_texts.append(lowerCamelCase_ )
else:
current_sub_text.append(lowerCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Tuple = """""".join(lowerCamelCase_ )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : List[Any] = self.clean_up_tokenization(lowerCamelCase_ )
return clean_text
else:
return text
def _a ( self : Union[str, Any] ,_a : List[str] ,_a : Optional[Any] = None ):
'''simple docstring'''
A_ : Optional[Any] = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self : int ,_a : Union[str, Any] ,_a : Optional[int] = None ,_a : List[str] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def _a ( self : List[str] ,_a : int ,_a : int = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self : int ,_a : Optional[Any] ,_a : Optional[int] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ ,"""wb""" ) as fi:
A_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list[int]):
A_ : Dict = len(lowerCamelCase) // 2
# choose the middle 3 elements
A_ : Tuple = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : int = None , ):
A_ : Tuple = {}
if train_file is not None:
A_ : Optional[Any] = [train_file]
if eval_file is not None:
A_ : Tuple = [eval_file]
if test_file is not None:
A_ : Union[str, Any] = [test_file]
A_ : Dict = datasets.load_dataset("""csv""" , data_files=__lowerCAmelCase)
A_ : List[str] = list(ds[list(files.keys())[0]].features.keys())
A_ : str = features_name.pop(__lowerCAmelCase)
A_ : Optional[int] = list(set(ds[list(files.keys())[0]][label_name]))
A_ : List[Any] = {label: i for i, label in enumerate(__lowerCAmelCase)}
A_ : Tuple = tokenizer.model_input_names
A_ : Dict = {}
if len(__lowerCAmelCase) == 1:
for k in files.keys():
A_ : Tuple = ds[k].map(
lambda lowerCamelCase: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""") , batched=__lowerCAmelCase , )
elif len(__lowerCAmelCase) == 2:
for k in files.keys():
A_ : int = ds[k].map(
lambda lowerCamelCase: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , ) , batched=__lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A_ : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
A_ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
A_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A_ : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
A_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
A_ : List[Any] = (
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A_ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
A_ : Optional[Any] = (
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A_ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
A_ : List[Any] = (
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A_ : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(metadata={"""help""": """Which column contains the label"""} )
a_ = field(default=__UpperCAmelCase , metadata={"""help""": """The path of the training file"""} )
a_ = field(default=__UpperCAmelCase , metadata={"""help""": """The path of the development file"""} )
a_ = field(default=__UpperCAmelCase , metadata={"""help""": """The path of the test file"""} )
a_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=__UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=__UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=__UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(default=__UpperCAmelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=__UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCamelCase ( ):
A_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
A_ , A_ , A_ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '
F'16-bits training: {training_args.fpaa}')
logger.info(F'Training/evaluation parameters {training_args}')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ , A_ , A_ , A_ : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCAmelCase) , labelaid=__lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase : Union[str, Any]) -> Dict:
A_ : List[Any] = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A_ : List[str] = TFTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
A_ : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
A_ : Optional[Any] = trainer.evaluate()
A_ : Optional[int] = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(__lowerCAmelCase , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F' {key} = {value}')
writer.write(F'{key} = {value}\n')
results.update(__lowerCAmelCase)
return results
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
a_ = """Speech2TextFeatureExtractor"""
a_ = """Speech2TextTokenizer"""
def __init__( self : str ,_a : Optional[int] ,_a : Dict ):
'''simple docstring'''
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
A_ : str = self.feature_extractor
A_ : Dict = False
def __call__( self : Any ,*_a : str ,**_a : str ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase ,**_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A_ : Tuple = kwargs.pop("""raw_speech""" )
else:
A_ : Union[str, Any] = kwargs.pop("""audio""" ,_UpperCAmelCase )
A_ : Optional[int] = kwargs.pop("""sampling_rate""" ,_UpperCAmelCase )
A_ : Tuple = kwargs.pop("""text""" ,_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A_ : str = args[0]
A_ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A_ : List[str] = self.feature_extractor(_UpperCAmelCase ,*_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,**_UpperCAmelCase )
if text is not None:
A_ : str = self.tokenizer(_UpperCAmelCase ,**_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ : Optional[int] = encodings['''input_ids''']
return inputs
def _a ( self : Dict ,*_a : Dict ,**_a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def _a ( self : Dict ,*_a : str ,**_a : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@contextmanager
def _a ( self : Any ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A_ : List[str] = True
A_ : Union[str, Any] = self.tokenizer
yield
A_ : Optional[int] = self.feature_extractor
A_ : Optional[Any] = False
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.