code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a_ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 75 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any ) -> Any:
# Initialise PyTorch model
lowercase : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print("Building PyTorch model from configuration: {}".format(str(__snake_case ) ) )
lowercase : str = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print("Save PyTorch model to {}".format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 202 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "naver-clova-ix/donut-base-finetuned-docvqa"
A_ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A_ = "document_qa"
A_ = AutoProcessor
A_ = VisionEncoderDecoderModel
A_ = ["image", "text"]
A_ = ["text"]
def __init__( self: str , *__A: List[Any] , **__A: Union[str, Any] ) -> Dict:
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__A , **__A )
def __A ( self: Dict , __A: "Image" , __A: str ) -> int:
_A = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_A = task_prompt.replace('''{user_input}''' , __A )
_A = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='''pt''' ).input_ids
_A = self.pre_processor(__A , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self: Optional[int] , __A: str ) -> str:
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def __A ( self: Union[str, Any] , __A: List[Any] ) -> Optional[int]:
_A = self.pre_processor.batch_decode(__A )[0]
_A = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_A = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_A = re.sub(R'''<.*?>''' , '''''' , __A , count=1 ).strip() # remove first task start token
_A = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 75 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "rag"
A_ = True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=True , __A: Any=None , __A: Dict=None , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[Any]=None , __A: Optional[Any]=" / " , __A: int=" // " , __A: List[Any]=5 , __A: Dict=3_00 , __A: int=7_68 , __A: Tuple=8 , __A: List[Any]="wiki_dpr" , __A: List[str]="train" , __A: Optional[Any]="compressed" , __A: Optional[int]=None , __A: Union[str, Any]=None , __A: Dict=False , __A: Tuple=False , __A: Optional[int]=0.0 , __A: Optional[int]=True , __A: int=False , __A: int=False , __A: Optional[Any]=False , __A: Optional[int]=True , __A: Optional[int]=None , **__A: Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A = kwargs.pop('''question_encoder''' )
_A = question_encoder_config.pop('''model_type''' )
_A = kwargs.pop('''generator''' )
_A = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__A , **__A )
_A = AutoConfig.for_model(__A , **__A )
_A = reduce_loss
_A = label_smoothing
_A = exclude_bos_score
_A = do_marginalize
_A = title_sep
_A = doc_sep
_A = n_docs
_A = max_combined_length
_A = dataset
_A = dataset_split
_A = index_name
_A = retrieval_vector_size
_A = retrieval_batch_size
_A = passages_path
_A = index_path
_A = use_dummy_dataset
_A = output_retrieved
_A = do_deduplication
_A = use_cache
if self.forced_eos_token_id is None:
_A = getattr(self.generator , '''forced_eos_token_id''' , __A )
@classmethod
def __A ( cls: List[Any] , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def __A ( self: Optional[Any] ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.question_encoder.to_dict()
_A = self.generator.to_dict()
_A = self.__class__.model_type
return output
| 75 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Any , a : Path , a : Union[str, None] = None , a : Union[List[str], None] = None , a : Union[str, List[str], None] = None , a : bool = True , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [file for file in os.listdir(a ) if os.path.isfile(os.path.join(a , a ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE : List[str] = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE : str = ignore_files or []
ignore_files.append("__init__.py" )
SCREAMING_SNAKE_CASE : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a )
if only_modules:
SCREAMING_SNAKE_CASE : Any = file.split("." )[0]
try:
SCREAMING_SNAKE_CASE : int = getattr(a , a )
SCREAMING_SNAKE_CASE : List[Any] = doctest.DocTestSuite(a )
SCREAMING_SNAKE_CASE : str = unittest.TextTestRunner().run(a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"{module_identifier} is not a module." )
else:
SCREAMING_SNAKE_CASE : str = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Path("src/transformers" )
SCREAMING_SNAKE_CASE : List[str] = '''modeling'''
SCREAMING_SNAKE_CASE : Any = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(a , identifier=a , ignore_files=a )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Path("src/transformers" )
SCREAMING_SNAKE_CASE : List[str] = '''tokenization'''
self.analyze_directory(a , identifier=a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path("src/transformers" )
SCREAMING_SNAKE_CASE : List[str] = '''configuration'''
self.analyze_directory(a , identifier=a )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = Path("src/transformers" )
SCREAMING_SNAKE_CASE : Dict = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(a , n_identifier=a )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Path("docs/source" )
SCREAMING_SNAKE_CASE : List[str] = ['''favicon.ico''']
self.analyze_directory(a , ignore_files=a , only_modules=a )
| 76 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Any ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = data
lowerCamelCase__ : Any = None
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->str:
lowerCamelCase__ : Any = None
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCamelCase__ : Dict = temp.next
print()
def __lowerCamelCase ( self : Dict , A : Any ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = Node(A )
lowerCamelCase__ : Dict = self.head
lowerCamelCase__ : List[str] = new_node
def __lowerCamelCase ( self : Optional[int] , A : int , A : Tuple ) ->List[Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Union[str, Any] = node_a.next
lowerCamelCase__ : int = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase__ , lowerCamelCase__ : str = node_a.data, node_a.data
if __name__ == "__main__":
_A : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 142 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(_snake_case , np.ndarray ):
return list(tensor.shape )
_A = tf.shape(_snake_case )
if tensor.shape == tf.TensorShape(_snake_case ):
return dynamic
_A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_snake_case )]
def _snake_case ( _snake_case : tf.Tensor , _snake_case : Optional[int] = None , _snake_case : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=_snake_case , name=_snake_case )
def _snake_case ( _snake_case : Dict , _snake_case : Any , _snake_case : List[str] , _snake_case : List[str]=1E-5 , _snake_case : Optional[Any]=-1 ) -> Any:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_snake_case , _snake_case ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_A , _A = tf.nn.moments(_snake_case , axes=[axis] , keepdims=_snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_A = [1] * inputs.shape.rank
_A = shape_list(_snake_case )[axis]
_A = tf.reshape(_snake_case , _snake_case )
_A = tf.reshape(_snake_case , _snake_case )
# Compute layer normalization using the batch_normalization
# function.
_A = tf.nn.batch_normalization(
_snake_case , _snake_case , _snake_case , offset=_snake_case , scale=_snake_case , variance_epsilon=_snake_case , )
return outputs
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any]=0 , _snake_case : int=-1 ) -> List[Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_A = tf.shape(_snake_case )
_A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_snake_case , _snake_case )
def _snake_case ( _snake_case : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(_snake_case , tf.Tensor ):
_A = tf.convert_to_tensor(_snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( _snake_case : tf.Tensor , _snake_case : int , _snake_case : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
_snake_case , tf.cast(_snake_case , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(_snake_case )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
_A = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_A = [x for x in data if len(_snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_A = np.asarray(_snake_case )
_A = 1
_A = np.array_split(_snake_case , _snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_A = np.array_split(_snake_case , _snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_snake_case ):
_A = chunk_data
else:
_A = data
def _snake_case ( _snake_case : Any , _snake_case : Tuple ) -> Dict:
'''simple docstring'''
if name in group.attrs:
_A = [n.decode('utf8' ) if hasattr(_snake_case , 'decode' ) else n for n in group.attrs[name]]
else:
_A = []
_A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_snake_case , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def _expand_single_ad_tensor(_snake_case : str ):
if isinstance(_snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _snake_case )
| 271 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
a = [8, 5, 9, 7]
a = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[list[int]] , ):
_A = claim_vector
_A = allocated_resources_table
_A = maximum_claim_table
def lowerCAmelCase_ ( self : Tuple ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase_ ( self : Tuple ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase_ ( self : List[Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase_ ( self : List[Any] ):
return {self.__need().index(_UpperCAmelCase ): i for i in self.__need()}
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : int ):
_A = self.__need()
_A = self.__allocated_resources_table
_A = self.__available_resources()
_A = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_A = False
for each_need in need_list:
_A = True
for index, need in enumerate(_UpperCAmelCase ):
if need > available_resources[index]:
_A = False
break
if execution:
_A = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_A = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_UpperCAmelCase )
# update available/freed resources stack
_A = np.array(_UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def lowerCAmelCase_ ( self : Union[str, Any] ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCAmelCase_ = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class snake_case_ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int = 1_4 ) ->None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
snake_case_ = primes[group]['''prime''']
snake_case_ = primes[group]['''generator''']
snake_case_ = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def snake_case__( self : int ) ->str:
return hex(self.__private_key )[2:]
def snake_case__( self : List[str] ) ->str:
snake_case_ = pow(self.generator , self.__private_key , self.prime )
return hex(_UpperCamelCase )[2:]
def snake_case__( self : Optional[Any] , _UpperCamelCase : int ) ->bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case__( self : List[str] , _UpperCamelCase : str ) ->str:
snake_case_ = int(_UpperCamelCase , base=1_6 )
if not self.is_valid_public_key(_UpperCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case_ = pow(_UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def snake_case__( _UpperCamelCase : int , _UpperCamelCase : int ) ->bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCamelCase , (prime - 1) // 2 , _UpperCamelCase ) == 1
)
@staticmethod
def snake_case__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int = 1_4 ) ->str:
snake_case_ = int(_UpperCamelCase , base=1_6 )
snake_case_ = int(_UpperCamelCase , base=1_6 )
snake_case_ = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case_ = pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def _A ( A__ , A__=1.0 , A__=None , A__=None ):
"""simple docstring"""
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=7 ,lowercase__ : Dict=4_0_0 ,lowercase__ : Tuple=2_0_0_0 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[int]=1_6_0 ,lowercase__ : Dict=8 ,lowercase__ : str=0.0 ,lowercase__ : Union[str, Any]=4_0_0_0 ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str]=False ,lowercase__ : Dict=False ):
def _flatten(lowercase__ : Optional[Any] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
__lowercase = self.feature_extraction_class.from_pretrained(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''feat_extract.json''' )
feat_extract_first.to_json_file(lowercase__ )
__lowercase = self.feature_extraction_class.from_json_file(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase__ ,padding='''max_length''' ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(lowercase__ )
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ,3_2 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
__lowercase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(lowercase__ ,return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowercase__ )[0]
self.assertTrue(np.all(np.mean(lowercase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ ) - 1 ) < 1e-3 ) )
| 104 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : int = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : List[str] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
__magic_name__ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__magic_name__ : List[str] = DDPMScheduler()
__magic_name__ : List[Any] = AudioDiffusionPipeline(vqvae=__lowerCamelCase , unet=self.dummy_unet , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__magic_name__ : Any = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__magic_name__ : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__magic_name__ : Tuple = pipe(generator=__lowerCamelCase , steps=4 )
__magic_name__ : Optional[Any] = output.audios[0]
__magic_name__ : List[str] = output.images[0]
__magic_name__ : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__magic_name__ : Dict = pipe(generator=__lowerCamelCase , steps=4 , return_dict=__lowerCamelCase )
__magic_name__ : Optional[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__magic_name__ : int = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__magic_name__ : Tuple = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
__magic_name__ : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__magic_name__ : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__magic_name__ : int = DDIMScheduler()
__magic_name__ : Dict = self.dummy_vqvae_and_unet
__magic_name__ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__magic_name__ : Union[str, Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
__magic_name__ : Tuple = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__magic_name__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__magic_name__ : Optional[int] = pipe(raw_audio=__lowerCamelCase , generator=__lowerCamelCase , start_step=5 , steps=10 )
__magic_name__ : Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__magic_name__ : Any = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__magic_name__ : Any = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__magic_name__ : Union[str, Any] = self.dummy_unet_condition
__magic_name__ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowerCamelCase , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
__magic_name__ : List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
__magic_name__ : Union[str, Any] = torch.rand((1, 1, 10) )
__magic_name__ : Optional[Any] = pipe(generator=__lowerCamelCase , encoding=__lowerCamelCase )
__magic_name__ : Union[str, Any] = output.images[0]
__magic_name__ : Any = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__magic_name__ : Union[str, Any] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = torch_device
__magic_name__ : Optional[Any] = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
__magic_name__ : Optional[int] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__magic_name__ : List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
__magic_name__ : str = pipe(generator=__lowerCamelCase )
__magic_name__ : Tuple = output.audios[0]
__magic_name__ : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__magic_name__ : Dict = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__magic_name__ : Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 367 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = hidden_size if embedding_size is None else embedding_size
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Optional[int] = rotary_value
__magic_name__ : List[Any] = use_cache
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : str = {0: "batch", 1: "sequence"}
__magic_name__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 41 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> list:
'''simple docstring'''
UpperCamelCase__ = len(__A )
for i in range(1 , __A ):
UpperCamelCase__ = collection[i]
UpperCamelCase__ = 0
UpperCamelCase__ = i - 1
while low <= high:
UpperCamelCase__ = (low + high) // 2
if val < collection[mid]:
UpperCamelCase__ = mid - 1
else:
UpperCamelCase__ = mid + 1
for j in range(__A , __A , -1 ):
UpperCamelCase__ = collection[j - 1]
UpperCamelCase__ = val
return collection
if __name__ == "__main__":
a__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
a__ : str = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 80 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def snake_case__ ( self : Dict )-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]:
'''simple docstring'''
A__ = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
A__ = torch.tensor([[1, 2]],device=lowercase_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=lowercase_,bbox=lowercase_ )
A__ = torch.Size([1, 2, 7_6_8] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,)
self.assertTrue(outputs.last_hidden_state.shape,lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
| 282 | 1 |
'''simple docstring'''
def a_ ( __snake_case : list ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__snake_case ) == 1:
return True
lowerCamelCase_ =series[1] - series[0]
for index in range(len(__snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a_ ( __snake_case : list ) -> float:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowerCamelCase_ =0
for val in series:
answer += val
return answer / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =FunnelTokenizer
lowercase : List[str] =FunnelTokenizerFast
lowercase : Union[str, Any] =True
lowercase : int =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =[
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''UNwant\u00E9d,running'''
lowerCamelCase_ ='''unwanted, running'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file )
lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' )
lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len )
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
| 75 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Dict = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _UpperCAmelCase ( A_ ):
"""simple docstring"""
a_ = "open-llama"
def __init__( self : List[str] , lowerCAmelCase_ : Tuple=1_0_0_0_0_0 , lowerCAmelCase_ : Dict=4_0_9_6 , lowerCAmelCase_ : str=1_1_0_0_8 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : str="silu" , lowerCAmelCase_ : str=2_0_4_8 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=1e-6 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Dict:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = rms_norm_eps
__lowerCAmelCase = use_cache
__lowerCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_dropout_prob
__lowerCAmelCase = use_stable_embedding
__lowerCAmelCase = shared_input_output_embedding
__lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
__lowerCAmelCase = self.rope_scaling.get('type' , snake_case__ )
__lowerCAmelCase = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 360 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case : Union[str, Any] = False
try:
_snake_case : Tuple = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = [] ) -> Optional[int]:
__lowerCAmelCase = 0
__lowerCAmelCase = choices
__lowerCAmelCase = prompt
if sys.platform == "win32":
__lowerCAmelCase = '*'
else:
__lowerCAmelCase = '➔ '
def lowercase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = "" ) -> Any:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , lowerCAmelCase_ )
else:
forceWrite(self.choices[index] , lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : int ) -> List[Any]:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(lowerCAmelCase_ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase ( self : Dict , lowerCAmelCase_ : Direction , lowerCAmelCase_ : int = 1 ) -> Union[str, Any]:
__lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase_ )
move_cursor(lowerCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def lowercase ( self : Optional[int] ) -> Tuple:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def lowercase ( self : str ) -> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def lowercase ( self : str ) -> Any:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def lowercase ( self : Dict ) -> Optional[Any]:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase_ )] for number in range(1_0 )] )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = int(chr(self.current_selection ) )
__lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase_ )
else:
return
else:
return
def lowercase ( self : Any , lowerCAmelCase_ : int = 0 ) -> int:
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCAmelCase = int(builtins.input() )
except ValueError:
__lowerCAmelCase = default_choice
else:
__lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(lowerCAmelCase_ , '\n' )
return choice
| 207 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 271 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271 | 1 |
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 306 |
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : str , __lowerCamelCase : pyspark.sql.DataFrame , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : bool = True , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : bool = True , __lowerCamelCase : str = "arrow" , **__lowerCamelCase : Dict , ):
super().__init__(
split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :List[Any] = load_from_cache_file
UpperCamelCase :Tuple = file_format
UpperCamelCase :Any = Spark(
df=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , working_dir=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : str ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase :List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 38 |
'''simple docstring'''
from __future__ import annotations
import requests
_A : str =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ) -> dict:
lowerCamelCase__ : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
lowerCamelCase__ : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : str = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
lowerCamelCase__ : Dict = {}
for id_ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 41 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_lowercase: List[str] = logging.get_logger(__name__)
def a( A : List[Any] , A : int , A : Any , A : str=None , A : Dict=None ) -> Optional[Any]:
"""simple docstring"""
if "." in tensor_name:
a = tensor_name.split("." )
for split in splits[:-1]:
a = getattr(A , A )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
a = new_module
a = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
a = tensor_name in module._buffers
a = getattr(A , A )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
a = False
a = False
if is_buffer or not is_bitsandbytes_available():
a = False
a = False
else:
a = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
a = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
a = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
a = old_value.to(A )
elif isinstance(A , torch.Tensor ):
a = value.to("cpu" )
if value.dtype == torch.inta:
a = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
a = torch.tensor(A , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A ) and fpaa_statistics is None:
a = new_value.T
a = old_value.__dict__
if is_abit:
a = bnb.nn.IntaParams(A , requires_grad=A , **A ).to(A )
elif is_abit:
a = bnb.nn.Paramsabit(A , requires_grad=A , **A ).to(A )
a = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(A ) )
else:
if value is None:
a = old_value.to(A )
elif isinstance(A , torch.Tensor ):
a = value.to(A )
else:
a = torch.tensor(A , device=A )
if is_buffer:
a = new_value
else:
a = nn.Parameter(A , requires_grad=old_value.requires_grad )
a = new_value
def a( A : Any , A : str=None , A : str=None , A : List[str]=None , A : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
a = []
current_key_name.append(A )
if (isinstance(A , nn.Linear ) or isinstance(A , A )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(A ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A , A ):
a , a = module.weight.shape
else:
a = module.in_features
a = module.out_features
if quantization_config.quantization_method() == "llm_int8":
a = bnb.nn.LinearabitLt(
A , A , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
a = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
a = bnb.nn.Linearabit(
A , A , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
a = True
# Store the module class in case we need to transpose the weight later
a = type(A )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A )
if len(list(module.children() ) ) > 0:
a , a = _replace_with_bnb_linear(
A , A , A , A , has_been_replaced=A , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a( A : List[Any] , A : Dict=None , A : Dict=None , A : Optional[int]=None ) -> Dict:
"""simple docstring"""
a = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
a , a = _replace_with_bnb_linear(
A , A , A , A )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def a( *A : Dict , **A : Union[str, Any] ) -> str:
"""simple docstring"""
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , A , )
return replace_with_bnb_linear(*A , **A )
def a( *A : str , **A : List[str] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , A , )
return set_module_quantized_tensor_to_device(*A , **A )
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
a = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
a = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a = sum(A , [] )
a = len(A ) > 0
# Check if it is a base model
a = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a = list(model.named_children() )
a = [list_modules[-1][0]]
# add last module together with tied weights
a = set(A ) - set(A )
a = list(set(A ) ) + list(A )
# remove ".weight" from the keys
a = [".weight", ".bias"]
a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a = name.replace(A , "" )
filtered_module_names.append(A )
return filtered_module_names
| 71 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor:
a = []
a = Counter()
a = 0
a = defaultdict(lowerCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
for candidate in candidates:
a = candidate + "\n" + test_case
a = (test_program, timeout, task_id, completion_id[task_id])
a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ )
futures.append(lowerCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase_ ):
a = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
a , a = [], []
for result in results.values():
result.sort()
a = [r[1]["passed"] for r in result]
total.append(len(lowerCamelCase_ ) )
correct.append(sum(lowerCamelCase_ ) )
a = np.array(lowerCamelCase_ )
a = np.array(lowerCamelCase_ )
a = k
a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]:
"""simple docstring"""
def estimator(A : int , A : int , A : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A , A ):
a = itertools.repeat(A , len(A ) )
else:
assert len(A ) == len(A )
a = iter(A )
return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
| 71 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : Dict = 256
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["melgan"]
def __init__( self : Dict , lowercase : SpectrogramNotesEncoder , lowercase : SpectrogramContEncoder , lowercase : TaFilmDecoder , lowercase : DDPMScheduler , lowercase : OnnxRuntimeModel if is_onnx_available() else Any , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_snake_case = math.log(1E-5 ) # Matches MelGAN training.
_snake_case = 4.0 # Largest value for most examples
_snake_case = 128
self.register_modules(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
def A ( self : Any , lowercase : int , lowercase : Dict=(-1.0, 1.0) , lowercase : Dict=False ):
'''simple docstring'''
_snake_case , _snake_case = output_range
if clip:
_snake_case = torch.clip(lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
_snake_case = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : str=(-1.0, 1.0) , lowercase : List[str]=False ):
'''simple docstring'''
_snake_case , _snake_case = input_range
_snake_case = torch.clip(lowercase , lowercase , lowercase ) if clip else outputs
# Scale to [0, 1].
_snake_case = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = input_tokens > 0
_snake_case , _snake_case = self.notes_encoder(
encoder_input_tokens=lowercase , encoder_inputs_mask=lowercase )
_snake_case , _snake_case = self.continuous_encoder(
encoder_inputs=lowercase , encoder_inputs_mask=lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A ( self : Optional[int] , lowercase : List[str] , lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = noise_time
if not torch.is_tensor(lowercase ):
_snake_case = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
_snake_case = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_snake_case = self.decoder(
encodings_and_masks=lowercase , decoder_input_tokens=lowercase , decoder_noise_time=lowercase )
return logits
@torch.no_grad()
def __call__( self : List[Any] , lowercase : List[List[int]] , lowercase : Optional[torch.Generator] = None , lowercase : int = 100 , lowercase : bool = True , lowercase : str = "numpy" , lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase : int = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowercase )}.''' )
_snake_case = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_snake_case = np.zeros([1, 0, self.n_dims] , np.floataa )
_snake_case = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase ):
if i == 0:
_snake_case = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_snake_case = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_snake_case = ones
_snake_case = self.scale_features(
lowercase , output_range=[-1.0, 1.0] , clip=lowercase )
_snake_case = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase , continuous_mask=lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_snake_case = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_snake_case = self.decode(
encodings_and_masks=lowercase , input_tokens=lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_snake_case = self.scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_snake_case = self.scale_to_features(lowercase , input_range=[-1.0, 1.0] )
_snake_case = mel[:1]
_snake_case = mel.cpu().float().numpy()
_snake_case = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase )
logger.info('Generated segment' , lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
_snake_case = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_snake_case = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase )
| 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase )
| 282 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : int ="""xvjiarui/stable-diffusion-2-inpainting"""
A__ , A__ : Tuple =FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : Union[str, Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : List[str] =jax.random.PRNGKey(0 )
A__ : Any =50
A__ : Optional[Any] =jax.device_count()
A__ : List[Any] =num_samples * [prompt]
A__ : str =num_samples * [init_image]
A__ : int =num_samples * [mask_image]
A__ , A__ , A__ : List[str] =pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
A__ : Dict =replicate(lowerCAmelCase_ )
A__ : List[Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() )
A__ : str =shard(lowerCAmelCase_ )
A__ : int =shard(lowerCAmelCase_ )
A__ : Dict =shard(lowerCAmelCase_ )
A__ : List[Any] =pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
A__ : Optional[Any] =output.images.reshape(lowerCAmelCase_ , 5_12 , 5_12 , 3 )
A__ : Dict =images[0, 2_53:2_56, 2_53:2_56, -1]
A__ : Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ : Optional[int] =jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 136 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
A__ : Any =num_of_nodes
A__ : list[list[int]] =[]
A__ : dict[int, int] ={}
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : int , lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
A__ : str =self.find_component(lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
A__ : int =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
A__ : List[str] =self.find_component(lowerCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase_ )
def lowercase__ ( self : str ) -> None:
'''simple docstring'''
A__ : Union[str, Any] =[]
A__ : List[str] =0
A__ : list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A__ : List[str] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A__ , A__ , A__ : Any =edge
A__ : Tuple =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A__ : Optional[int] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ , A__ , A__ : Tuple =edge
A__ : Any =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
A__ : int =[-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[int]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
def __A (self ) -> Optional[int]:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> Optional[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =LlamaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Any:
_lowercase =True
_lowercase =LlamaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
_lowercase =LlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[Any]:
_lowercase =True
_lowercase =True
_lowercase =LlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
_lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __A (self ) -> Any:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
_lowercase =LlamaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Dict:
self.config_tester.run_common_tests()
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='''single_label_classification'''
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A (self ) -> Dict:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='''multi_label_classification'''
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __A (self ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __A (self , UpperCAmelCase ) -> Optional[Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =ids_tensor([1, 1_0] , config.vocab_size )
_lowercase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase =LlamaModel(UpperCAmelCase )
original_model.to(UpperCAmelCase )
original_model.eval()
_lowercase =original_model(UpperCAmelCase ).last_hidden_state
_lowercase =original_model(UpperCAmelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase ={'''type''': scaling_type, '''factor''': 10.0}
_lowercase =LlamaModel(UpperCAmelCase )
scaled_model.to(UpperCAmelCase )
scaled_model.eval()
_lowercase =scaled_model(UpperCAmelCase ).last_hidden_state
_lowercase =scaled_model(UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> Tuple:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> List[str]:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> int:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __A (self ) -> Dict:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
_lowercase =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
_lowercase =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __A (self ) -> int:
_lowercase ='''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
_lowercase ='''Simply put, the theory of relativity states that '''
_lowercase =LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
_lowercase =tokenizer.encode(UpperCAmelCase , return_tensors='''pt''' )
_lowercase =LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase )
# greedy generation outputs
_lowercase =model.generate(UpperCAmelCase , max_new_tokens=6_4 , top_p=UpperCAmelCase , temperature=1 , do_sample=UpperCAmelCase )
_lowercase =tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 5 |
from math import asin, atan, cos, radians, sin, sqrt, tan
A__ : Optional[int] = 637_8137.0
A__ : List[str] = 635_6752.31_4245
A__ : Union[str, Any] = 6_37_81_37
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = radians(lowerCamelCase_ )
lowercase__ = radians(lowerCamelCase_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(lowerCamelCase_ ) * cos(lowerCamelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
if len(_A ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__magic_name__ : Dict = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__magic_name__: Union[str, Any] = False
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[int] = StableDiffusionAttendAndExcitePipeline
lowercase__ : Tuple = False
lowercase__ : List[str] = TEXT_TO_IMAGE_PARAMS
lowercase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __magic_name__ ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
__magic_name__ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__magic_name__ : Any = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : int = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = """cpu"""
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : str = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__magic_name__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__magic_name__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def __magic_name__ ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __magic_name__ ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__ ( self ) -> Any:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=5e-4 )
def __magic_name__ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = torch.manual_seed(51 )
__magic_name__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__magic_name__ : List[str] = """a painting of an elephant with glasses"""
__magic_name__ : Any = [5, 7]
__magic_name__ : List[Any] = pipe(
prompt=lowerCAmelCase__ , token_indices=lowerCAmelCase__ , guidance_scale=7.5 , generator=lowerCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__magic_name__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 138 | 1 |
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a : Dict = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , "vision" )
self.check_model_type(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , **lowerCAmelCase__ ) -> Any:
return {}, {}, {}
def __a ( self , lowerCAmelCase__ ) -> Tuple:
a : Union[str, Any] = load_image(lowerCAmelCase__ )
a : List[Any] = image.size
a : Any = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
return model_inputs
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : List[str] = self.model(**lowerCAmelCase__ )
return model_outputs
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Optional[int] = model_outputs.predicted_depth
a : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowerCAmelCase__ )
a : str = prediction.squeeze().cpu().numpy()
a : Any = (output * 255 / np.max(lowerCAmelCase__ )).astype("uint8" )
a : str = Image.fromarray(lowerCAmelCase__ )
a : Dict = {}
a : str = predicted_depth
a : Optional[Any] = depth
return output_dict
| 79 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Any = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( a_ ,a_=0.999 ,a_="cosine" ,) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCamelCase : str =[]
for i in range(a_ ):
__UpperCamelCase : Optional[int] =i / num_diffusion_timesteps
__UpperCamelCase : Optional[Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) ,a_ ) )
return torch.tensor(a_ ,dtype=torch.floataa )
class __A ( a , a ):
"""simple docstring"""
UpperCamelCase__ : Tuple =[e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ : Optional[Any] =2
@register_to_config
def __init__( self , lowerCamelCase__ = 1000 , lowerCamelCase__ = 0.00_085 , lowerCamelCase__ = 0.012 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = "linspace" , lowerCamelCase__ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__UpperCamelCase : Optional[int] =torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase : Tuple =torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase : int =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase : Dict =betas_for_alpha_bar(lowerCamelCase__ )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__UpperCamelCase : Dict =1.0 - self.betas
__UpperCamelCase : List[Any] =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if schedule_timesteps is None:
__UpperCamelCase : Dict =self.timesteps
__UpperCamelCase : List[Any] =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCamelCase : List[str] =1 if len(lowerCamelCase__ ) > 1 else 0
else:
__UpperCamelCase : int =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
__UpperCamelCase : Optional[Any] =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.index_for_timestep(lowerCamelCase__ )
if self.state_in_first_order:
__UpperCamelCase : str =self.sigmas[step_index]
else:
__UpperCamelCase : Optional[int] =self.sigmas_interpol[step_index]
__UpperCamelCase : Any =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
__UpperCamelCase : Dict =num_inference_steps
__UpperCamelCase : Optional[int] =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase : Optional[int] =np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase__ , dtype=lowerCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase : int =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : List[Any] =(np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase : List[str] =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : Optional[int] =(np.arange(lowerCamelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__UpperCamelCase : Any =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCamelCase : Optional[Any] =torch.from_numpy(np.log(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Any =np.interp(lowerCamelCase__ , np.arange(0 , len(lowerCamelCase__ ) ) , lowerCamelCase__ )
__UpperCamelCase : List[Any] =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCamelCase : Dict =torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ )
# interpolate sigmas
__UpperCamelCase : Tuple =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__UpperCamelCase : int =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCamelCase : Optional[Any] =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase__ ).startswith('mps' ):
# mps does not support float64
__UpperCamelCase : Tuple =torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ , dtype=torch.floataa )
else:
__UpperCamelCase : List[str] =torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
# interpolate timesteps
__UpperCamelCase : Union[str, Any] =self.sigma_to_t(lowerCamelCase__ ).to(lowerCamelCase__ , dtype=timesteps.dtype )
__UpperCamelCase : List[Any] =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__UpperCamelCase : List[Any] =torch.cat([timesteps[:1], interleaved_timesteps] )
__UpperCamelCase : Optional[int] =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase : List[Any] =defaultdict(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =sigma.log()
# get distribution
__UpperCamelCase : Optional[int] =log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCamelCase : List[str] =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__UpperCamelCase : List[Any] =low_idx + 1
__UpperCamelCase : int =self.log_sigmas[low_idx]
__UpperCamelCase : Tuple =self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase : str =(low - log_sigma) / (low - high)
__UpperCamelCase : List[Any] =w.clamp(0 , 1 )
# transform interpolation to time range
__UpperCamelCase : List[str] =(1 - w) * low_idx + w * high_idx
__UpperCamelCase : Dict =t.view(sigma.shape )
return t
@property
def __lowercase ( self ):
"""simple docstring"""
return self.sample is None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
"""simple docstring"""
__UpperCamelCase : int =self.index_for_timestep(lowerCamelCase__ )
# advance index counter by 1
__UpperCamelCase : str =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : str =self.sigmas_interpol[step_index + 1]
__UpperCamelCase : Any =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCamelCase : int =self.sigmas[step_index - 1]
__UpperCamelCase : Tuple =self.sigmas_interpol[step_index]
__UpperCamelCase : List[Any] =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase : Any =0
__UpperCamelCase : Tuple =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase : List[str] =sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCamelCase : Union[str, Any] =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Any =sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCamelCase : Optional[int] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase : List[Any] =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase : List[str] =sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCamelCase : Tuple =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCamelCase : Optional[Any] =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCamelCase : Optional[Any] =sigma_next - sigma_hat
__UpperCamelCase : List[Any] =self.sample
__UpperCamelCase : int =None
__UpperCamelCase : Union[str, Any] =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : int =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__ ):
# mps does not support float64
__UpperCamelCase : int =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCamelCase : Union[str, Any] =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCamelCase : Optional[Any] =self.timesteps.to(original_samples.device )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device )
__UpperCamelCase : Any =[self.index_for_timestep(lowerCamelCase__ , lowerCamelCase__ ) for t in timesteps]
__UpperCamelCase : List[Any] =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCamelCase : int =sigma.unsqueeze(-1 )
__UpperCamelCase : Dict =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71 | 1 |
def lowerCAmelCase_ ( _snake_case : list ) -> float:
'''simple docstring'''
__magic_name__ : Any = 0
while len(_snake_case ) > 1:
__magic_name__ : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__magic_name__ : int = files.index(min(_snake_case ) )
temp += files[min_index]
files.pop(_snake_case )
files.append(_snake_case )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[int] = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
if "emb" in name:
__magic_name__ : Optional[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__magic_name__ : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__magic_name__ : Dict = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__magic_name__ : Optional[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
__magic_name__ : List[str] = name.replace("linear2" , "fc2" )
if "norm1" in name:
__magic_name__ : Optional[int] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__magic_name__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__magic_name__ : Any = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__magic_name__ : Union[str, Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__magic_name__ : Optional[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__magic_name__ : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowerCAmelCase_ ( _snake_case : OrderedDict , _snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__magic_name__ : int = list(state_dict.keys() )
__magic_name__ : Dict = {}
for key in keys:
__magic_name__ : Any = state_dict.pop(_snake_case )
__magic_name__ : Optional[Any] = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__magic_name__ : Optional[int] = val[:hidden_size, :]
__magic_name__ : List[str] = val[hidden_size : 2 * hidden_size, :]
__magic_name__ : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__magic_name__ : int = val
else:
__magic_name__ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( _snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__magic_name__ : Tuple = 1024
__magic_name__ : List[str] = 24
__magic_name__ : str = 16
elif checkpoint == "medium":
__magic_name__ : Optional[int] = 1536
__magic_name__ : Dict = 48
__magic_name__ : List[Any] = 24
elif checkpoint == "large":
__magic_name__ : Any = 2048
__magic_name__ : int = 48
__magic_name__ : str = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__magic_name__ : str = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Optional[Any]="cpu" ) -> List[str]:
'''simple docstring'''
__magic_name__ : Dict = MusicGen.get_pretrained(_snake_case , device=_snake_case )
__magic_name__ : Any = decoder_config_from_checkpoint(_snake_case )
__magic_name__ : Any = fairseq_model.lm.state_dict()
__magic_name__ , __magic_name__ : Optional[Any] = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
__magic_name__ : str = TaEncoderModel.from_pretrained("t5-base" )
__magic_name__ : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__magic_name__ : int = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__magic_name__ , __magic_name__ : List[str] = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__magic_name__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
__magic_name__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__magic_name__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__magic_name__ : Dict = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained("t5-base" )
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__magic_name__ : Union[str, Any] = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
__magic_name__ : List[str] = 2048
__magic_name__ : List[str] = 2048
# set other default generation config params
__magic_name__ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
__magic_name__ : Optional[Any] = True
__magic_name__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
snake_case : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : str = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : str = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = RobertaTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Optional[Any]="<unk>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 136 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
lowercase_ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase=0.0 , __lowercase = None , __lowercase = "geglu" , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = "layer_norm" , __lowercase = False , ):
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(__lowercase , __lowercase )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(__lowercase , __lowercase )
else:
__lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase )
__lowerCAmelCase = Attention(
query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , dropout=__lowercase , bias=__lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(__lowercase , __lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(__lowercase , elementwise_affine=__lowercase )
)
__lowerCAmelCase = Attention(
query_dim=__lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowercase , dim_head=__lowercase , dropout=__lowercase , bias=__lowercase , upcast_attention=__lowercase , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase )
__lowerCAmelCase = FeedForward(__lowercase , dropout=__lowercase , activation_fn=__lowercase , final_dropout=__lowercase )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def _snake_case (self , __lowercase , __lowercase ):
# Sets chunk feed-forward
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(__lowercase , __lowercase )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
__lowercase , __lowercase , __lowercase , hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(__lowercase )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
__lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowercase , **__lowercase , )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(__lowercase , __lowercase ) if self.use_ada_layer_norm else self.norma(__lowercase )
)
__lowerCAmelCase = self.attna(
__lowercase , encoder_hidden_states=__lowercase , attention_mask=__lowercase , **__lowercase , )
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(__lowercase )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(__lowercase ) for hid_slice in norm_hidden_states.chunk(__lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__lowerCAmelCase = self.ff(__lowercase )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = None , __lowercase = 4 , __lowercase = 0.0 , __lowercase = "geglu" , __lowercase = False , ):
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(__lowercase , __lowercase )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(__lowercase , __lowercase , approximate='''tanh''' )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(__lowercase , __lowercase )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(__lowercase , __lowercase )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(__lowercase )
# project dropout
self.net.append(nn.Dropout(__lowercase ) )
# project out
self.net.append(nn.Linear(__lowercase , __lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowercase ) )
def _snake_case (self , __lowercase ):
for module in self.net:
__lowerCAmelCase = module(__lowercase )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = "none" ):
super().__init__()
__lowerCAmelCase = nn.Linear(__lowercase , __lowercase )
__lowerCAmelCase = approximate
def _snake_case (self , __lowercase ):
if gate.device.type != "mps":
return F.gelu(__lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.proj(__lowercase )
__lowerCAmelCase = self.gelu(__lowercase )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
super().__init__()
__lowerCAmelCase = nn.Linear(__lowercase , dim_out * 2 )
def _snake_case (self , __lowercase ):
if gate.device.type != "mps":
return F.gelu(__lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = self.proj(__lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
super().__init__()
__lowerCAmelCase = nn.Linear(__lowercase , __lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.proj(__lowercase )
return x * torch.sigmoid(1.7_0_2 * x )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
super().__init__()
__lowerCAmelCase = nn.Embedding(__lowercase , __lowercase )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__lowercase , embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.linear(self.silu(self.emb(__lowercase ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__lowercase , 2 )
__lowerCAmelCase = self.norm(__lowercase ) * (1 + scale) + shift
return x
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(__lowercase , __lowercase )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__lowercase , 6 * embedding_dim , bias=__lowercase )
__lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase , eps=1e-6 )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=None ):
__lowerCAmelCase = self.linear(self.silu(self.emb(__lowercase , __lowercase , hidden_dtype=__lowercase ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6 , dim=1 )
__lowerCAmelCase = self.norm(__lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = 1e-5 ):
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(__lowercase )
__lowerCAmelCase = nn.Linear(__lowercase , out_dim * 2 )
def _snake_case (self , __lowercase , __lowercase ):
if self.act:
__lowerCAmelCase = self.act(__lowercase )
__lowerCAmelCase = self.linear(__lowercase )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2 , dim=1 )
__lowerCAmelCase = F.group_norm(__lowercase , self.num_groups , eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 9 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[Any] = AltDiffusionPipeline
lowerCAmelCase_ : str = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowerCAmelCase : Optional[Any] = CLIPTextModel(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCAmelCase : Tuple = 77
lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int=0 ):
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCAmelCase : Optional[Any] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase : Tuple = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowercase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase : List[Any] = RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
lowerCAmelCase : str = text_encoder
lowerCAmelCase : Optional[Any] = AltDiffusionPipeline(**UpperCAmelCase_ )
lowerCAmelCase : List[str] = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase : str = 'A photo of an astronaut'
lowerCAmelCase : Tuple = alt_pipe(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output.images
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Dict = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
torch.manual_seed(0 )
lowerCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase : Optional[Any] = RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = text_encoder
lowerCAmelCase : List[str] = AltDiffusionPipeline(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = alt_pipe(**UpperCAmelCase_ )
lowerCAmelCase : int = output.images
lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[str] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ):
# make sure here that pndm scheduler skips prk
lowerCAmelCase : Any = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=UpperCAmelCase_ )
lowerCAmelCase : Any = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : List[str] = 'A painting of a squirrel eating a burger'
lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = alt_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
lowerCAmelCase : int = output.images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
lowerCAmelCase : int = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Any = 'A painting of a squirrel eating a burger'
lowerCAmelCase : str = torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = alt_pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='numpy' )
lowerCAmelCase : Optional[int] = output.images
lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : List[str] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 138 |
from datetime import datetime as dt
import os
from github import Github
__A : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase : List[str] = g.get_repo('huggingface/transformers' )
lowerCAmelCase : Tuple = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase : Dict = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 138 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A ( _a ):
lowercase_ = 'big_bird'
def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=5_03_58 , lowerCAmelCase_ : Optional[Any]=7_68 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Any="gelu_new" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=40_96 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=66 , lowerCAmelCase_ : List[Any]="block_sparse" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=64 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class A ( _a ):
@property
def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 179 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( _a ):
def __init__( self : Dict , lowerCAmelCase_ : CLIPSegForImageSegmentation , lowerCAmelCase_ : CLIPSegProcessor , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ) -> int:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
_a = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_a = dict(scheduler.config )
_a = 1
_a = FrozenDict(lowerCAmelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
_a = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_a = dict(scheduler.config )
_a = True
_a = FrozenDict(lowerCAmelCase_ )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=lowerCAmelCase_ , segmentation_processor=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Any , ) -> Tuple:
"""simple docstring"""
_a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
_a = self.segmentation_model(**lowerCAmelCase_ )
_a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_a = self.numpy_to_pil(lowerCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , )
| 179 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ):
'''simple docstring'''
super().__init__()
_A = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
_A = None
_A = torch.nn.Parameter(__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_A = self.learned_classifier_free_sampling_embeddings.embeddings
_A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
_A = [""] * batch_size
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = negative_prompt_embeds.shape[1]
_A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
_A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' )
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCAmelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_A = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_A = self.transformer.num_vector_embeds - 1
_A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
_A = self.scheduler.timesteps.to(self.device )
_A = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
_A , _A = model_output.chunk(2 )
_A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
_A = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
_A = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.vqvae.config.vq_embed_dim
_A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
_A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
_A = torch.exp(__UpperCAmelCase )
_A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
_A = torch.cat((all_true, keep_mask) , dim=1 )
_A = keep_mask[:, :-1, :]
_A = keep_mask.gather(1 , indices.argsort(1 ) )
_A = log_p_x_0.clone()
_A = -torch.inf # -inf = log(0)
return rv
| 79 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''canine'''
def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
# Character config:
_A = downsampling_rate
_A = upsampling_kernel_size
_A = num_hash_functions
_A = num_hash_buckets
_A = local_transformer_stride
| 79 | 1 |
"""simple docstring"""
import re
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
__lowerCamelCase = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 154 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list[int]:
lowerCamelCase__ : Any = [0] * no_of_processes
lowerCamelCase__ : Optional[int] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = burst_time[i]
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Tuple = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[int] = -1
for i in range(UpperCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCamelCase__ : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCamelCase__ : int = i
total_time += burst_time[target_process]
completed += 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[str] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list[int]:
lowerCamelCase__ : Optional[int] = [0] * no_of_processes
for i in range(UpperCamelCase ):
lowerCamelCase__ : int = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
_A : int =4
_A : Any =[2, 5, 3, 7]
_A : Dict =[0, 0, 0, 0]
_A : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_A : int =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 41 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"audio": Audio()} )
snake_case_ = Features({"transcription": Value("string" )} )
snake_case_ = "audio"
snake_case_ = "transcription"
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,A ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__A = copy.deepcopy(self )
__A = self.input_schema.copy()
__A = features[self.audio_column]
__A = input_schema
return task_template
@property
def UpperCamelCase_ ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 124 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE :str = 'docs/source/en/_toctree.yml'
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = defaultdict(a_ )
for doc in model_doc:
counts[doc["local"]] += 1
__A = [key for key, value in counts.items() if value > 1]
__A = []
for duplicate_key in duplicates:
__A = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(a_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(a_ , key=lambda a_ : s["title"].lower() )
def UpperCAmelCase ( a_=False ) -> List[Any]:
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as f:
__A = yaml.safe_load(f.read() )
# Get to the API doc
__A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A = content[api_idx]["sections"]
# Then to the model doc
__A = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__A = api_doc[model_idx]["sections"]
__A = [(idx, section) for idx, section in enumerate(a_ ) if "sections" in section]
__A = False
for idx, modality_doc in modalities_docs:
__A = modality_doc["sections"]
__A = clean_model_doc_toc(a_ )
if old_modality_doc != new_modality_doc:
__A = True
if overwrite:
__A = new_modality_doc
if diff:
if overwrite:
__A = model_doc
__A = api_doc
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 124 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :str = "geglu" , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :str = "layer_norm" , lowerCAmelCase__ :bool = False , ) -> Tuple:
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = only_cross_attention
__SCREAMING_SNAKE_CASE : int = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__SCREAMING_SNAKE_CASE : List[str] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__SCREAMING_SNAKE_CASE : Dict = AdaLayerNorm(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AdaLayerNormZero(lowerCAmelCase__ , lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = Attention(
query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , dropout=lowerCAmelCase__ , bias=lowerCAmelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowerCAmelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__SCREAMING_SNAKE_CASE : Optional[int] = (
AdaLayerNorm(lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
)
__SCREAMING_SNAKE_CASE : Dict = Attention(
query_dim=lowerCAmelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , dropout=lowerCAmelCase__ , bias=lowerCAmelCase__ , upcast_attention=lowerCAmelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Dict = None
# 3. Feed-forward
__SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = FeedForward(lowerCAmelCase__ , dropout=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , final_dropout=lowerCAmelCase__ )
# let chunk size default to None
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : int = 0
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int ) -> Union[str, Any]:
# Sets chunk feed-forward
__SCREAMING_SNAKE_CASE : Tuple = chunk_size
__SCREAMING_SNAKE_CASE : str = dim
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , lowerCAmelCase__ :Dict[str, Any] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , ) -> List[Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.norma(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.norma(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hidden_dtype=hidden_states.dtype )
else:
__SCREAMING_SNAKE_CASE : List[Any] = self.norma(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__SCREAMING_SNAKE_CASE : Any = self.attna(
lowerCAmelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : List[str] = gate_msa.unsqueeze(1 ) * attn_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
self.norma(lowerCAmelCase__ , lowerCAmelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCAmelCase__ )
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.attna(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : str = attn_output + hidden_states
# 3. Feed-forward
__SCREAMING_SNAKE_CASE : int = self.norma(lowerCAmelCase__ )
if self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__SCREAMING_SNAKE_CASE : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(
[self.ff(lowerCAmelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCAmelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__SCREAMING_SNAKE_CASE : int = self.ff(lowerCAmelCase__ )
if self.use_ada_layer_norm_zero:
__SCREAMING_SNAKE_CASE : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
__SCREAMING_SNAKE_CASE : Tuple = ff_output + hidden_states
return hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 4 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :str = "geglu" , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = int(dim * mult )
__SCREAMING_SNAKE_CASE : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__SCREAMING_SNAKE_CASE : Tuple = GELU(lowerCAmelCase__ , lowerCAmelCase__ )
if activation_fn == "gelu-approximate":
__SCREAMING_SNAKE_CASE : int = GELU(lowerCAmelCase__ , lowerCAmelCase__ , approximate='''tanh''' )
elif activation_fn == "geglu":
__SCREAMING_SNAKE_CASE : List[str] = GEGLU(lowerCAmelCase__ , lowerCAmelCase__ )
elif activation_fn == "geglu-approximate":
__SCREAMING_SNAKE_CASE : int = ApproximateGELU(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(lowerCAmelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCAmelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCAmelCase__ ) )
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Any:
for module in self.net:
__SCREAMING_SNAKE_CASE : str = module(lowerCAmelCase__ )
return hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :str = "none" ) -> Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = approximate
def __magic_name__( self :int , lowerCAmelCase__ :Optional[int] ) -> Tuple:
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.proj(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.gelu(lowerCAmelCase__ )
return hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Optional[Any]:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(lowerCAmelCase__ , dim_out * 2 )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.proj(lowerCAmelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowerCAmelCase__ )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Tuple:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = self.proj(lowerCAmelCase__ )
return x * torch.sigmoid(1.702 * x )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.SiLU()
__SCREAMING_SNAKE_CASE : Any = nn.Linear(lowerCAmelCase__ , embedding_dim * 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Any = self.linear(self.silu(self.emb(lowerCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = torch.chunk(lowerCAmelCase__ , 2 )
__SCREAMING_SNAKE_CASE : str = self.norm(lowerCAmelCase__ ) * (1 + scale) + shift
return x
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str ) -> Dict:
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = CombinedTimestepLabelEmbeddings(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.SiLU()
__SCREAMING_SNAKE_CASE : int = nn.Linear(lowerCAmelCase__ , 6 * embedding_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ , eps=1E-6 )
def __magic_name__( self :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.linear(self.silu(self.emb(lowerCAmelCase__ , lowerCAmelCase__ , hidden_dtype=lowerCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = emb.chunk(6 , dim=1 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.norm(lowerCAmelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :float = 1E-5 ) -> Tuple:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_groups
__SCREAMING_SNAKE_CASE : Optional[Any] = eps
if act_fn is None:
__SCREAMING_SNAKE_CASE : Optional[int] = None
else:
__SCREAMING_SNAKE_CASE : str = get_activation(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(lowerCAmelCase__ , out_dim * 2 )
def __magic_name__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
if self.act:
__SCREAMING_SNAKE_CASE : Dict = self.act(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.linear(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = emb[:, :, None, None]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = emb.chunk(2 , dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = F.group_norm(lowerCAmelCase__ , self.num_groups , eps=self.eps )
__SCREAMING_SNAKE_CASE : List[Any] = x * (1 + scale) + shift
return x
| 9 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
'''simple docstring'''
_UpperCAmelCase : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case ( ) -> Tuple:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
_UpperCAmelCase , _UpperCAmelCase : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : List[str] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 202 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=18 , __lowerCamelCase=30 , __lowerCamelCase=400 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=[0.5, 0.5, 0.5] , __lowerCamelCase=[0.5, 0.5, 0.5] , __lowerCamelCase=False , ):
'''simple docstring'''
__A : Dict = size if size is not None else {'''height''': 20, '''width''': 20}
__A : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__A : Optional[int] = parent
__A : Optional[int] = batch_size
__A : Any = num_channels
__A : Optional[Any] = image_size
__A : Any = min_resolution
__A : str = max_resolution
__A : Tuple = do_resize
__A : Any = size
__A : List[str] = do_center_crop
__A : List[str] = crop_size
__A : Optional[Any] = do_normalize
__A : int = image_mean
__A : List[str] = image_std
__A : str = do_reduce_labels
def UpperCamelCase__( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
__A : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''' )
__A : int = Image.open(dataset[0]['''file'''] )
__A : Optional[int] = Image.open(dataset[1]['''file'''] )
return image, map
def __lowercase ( ) ->Any:
'''simple docstring'''
__A : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''' )
__A : str = Image.open(ds[0]['''file'''] )
__A : str = Image.open(ds[1]['''file'''] )
__A : Union[str, Any] = Image.open(ds[2]['''file'''] )
__A : Optional[int] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = BeitImageProcessingTester(self )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
__A : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__A : Dict = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__A : Tuple = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__A : Any = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
__A : int = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__A : Tuple = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__A , __A : Optional[int] = prepare_semantic_single_inputs()
__A : Union[str, Any] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__A , __A : int = prepare_semantic_batch_inputs()
__A : Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A : Tuple = prepare_semantic_single_inputs()
__A : str = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__A : Any = True
__A : Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 179 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ = NewType("""DataClass""", Any)
a_ = NewType("""DataClassType""", Any)
def __lowercase ( snake_case_ : List[str] ) ->List[str]:
'''simple docstring'''
if isinstance(snake_case_ ,snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase ( snake_case_ : list ) ->Callable[[str], Any]:
'''simple docstring'''
__A : List[Any] = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ ,snake_case_ )
def __lowercase ( *,
snake_case_ : Union[str, List[str]] = None ,snake_case_ : str = None ,snake_case_ : Any = dataclasses.MISSING ,snake_case_ : Callable[[], Any] = dataclasses.MISSING ,snake_case_ : dict = None ,**snake_case_ : str ,) ->dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A : Optional[Any] = {}
if aliases is not None:
__A : List[Any] = aliases
if help is not None:
__A : str = help
return dataclasses.field(metadata=snake_case_ ,default=snake_case_ ,default_factory=snake_case_ ,**snake_case_ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
def __init__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
if "formatter_class" not in kwargs:
__A : str = ArgumentDefaultsHelpFormatter
super().__init__(**__lowerCamelCase )
if dataclasses.is_dataclass(__lowerCamelCase ):
__A : Union[str, Any] = [dataclass_types]
__A : Optional[Any] = list(__lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowerCamelCase )
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = F"""--{field.name}"""
__A : List[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__A : Tuple = kwargs.pop('''aliases''' , [] )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : Optional[int] = [aliases]
__A : str = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowerCamelCase , '''UnionType''' ) and isinstance(__lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(__lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
__A : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A : int = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A : int = (
field.type.__args__[0] if isinstance(__lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__A : Tuple = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A : Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type , __lowerCamelCase ) and issubclass(field.type , __lowerCamelCase )):
if origin_type is Literal:
__A : Union[str, Any] = field.type.__args__
else:
__A : Union[str, Any] = [x.value for x in field.type]
__A : Optional[int] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
else:
__A : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A : Any = copy(__lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
__A : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A : Optional[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
__A : str = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__A : int = True
elif isclass(__lowerCamelCase ) and issubclass(__lowerCamelCase , __lowerCamelCase ):
__A : str = field.type.__args__[0]
__A : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__A : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
__A : Tuple = True
else:
__A : Union[str, Any] = field.type
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
__A : List[str] = field.default_factory()
else:
__A : str = True
parser.add_argument(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A : List[str] = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if hasattr(__lowerCamelCase , '''_argument_group_name''' ):
__A : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__A : List[Any] = self
try:
__A : Dict[str, type] = get_type_hints(__lowerCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCamelCase ):
__A : List[str] = '''.'''.join(map(__lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowerCamelCase ):
if not field.init:
continue
__A : int = type_hints[field.name]
self._parse_dataclass_field(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A : Tuple = []
if args_filename:
args_files.append(Path(__lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A : Dict = ArgumentParser()
args_file_parser.add_argument(__lowerCamelCase , type=__lowerCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A : List[Any] = args_file_parser.parse_known_args(args=__lowerCamelCase )
__A : Dict = vars(__lowerCamelCase ).get(args_file_flag.lstrip('''-''' ) , __lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__lowerCamelCase ) for p in cmd_args_file_paths] )
__A : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A : Tuple = self.parse_known_args(args=__lowerCamelCase )
__A : int = []
for dtype in self.dataclass_types:
__A : List[str] = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : List[str] = {k: v for k, v in vars(__lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(__lowerCamelCase , __lowerCamelCase )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Tuple = set(args.keys() )
__A : Union[str, Any] = []
for dtype in self.dataclass_types:
__A : str = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__lowerCamelCase )}""" )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
with open(Path(__lowerCamelCase ) , encoding='''utf-8''' ) as open_json_file:
__A : List[str] = json.loads(open_json_file.read() )
__A : List[str] = self.parse_dict(__lowerCamelCase , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Dict = self.parse_dict(yaml.safe_load(Path(__lowerCamelCase ).read_text() ) , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 179 | 1 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Any = "ybelkada/fonts"
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ):
'''simple docstring'''
requires_backends(lowercase , ['torch'] )
_check_torch_version()
lowerCamelCase_ = image_tensor.unsqueeze(0 )
lowerCamelCase_ = torch.nn.functional.unfold(lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase , lowercase , -1 )
lowerCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int = 36 , lowercase : str = "black" , lowercase : str = "white" , lowercase : int = 5 , lowercase : int = 5 , lowercase : int = 5 , lowercase : int = 5 , lowercase : Optional[bytes] = None , lowercase : Optional[str] = None , ):
'''simple docstring'''
requires_backends(lowercase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowerCamelCase_ = textwrap.TextWrapper(width=80 )
lowerCamelCase_ = wrapper.wrap(text=lowercase )
lowerCamelCase_ = '\n'.join(lowercase )
if font_bytes is not None and font_path is None:
lowerCamelCase_ = io.BytesIO(lowercase )
elif font_path is not None:
lowerCamelCase_ = font_path
else:
lowerCamelCase_ = hf_hub_download(lowercase , 'Arial.TTF' )
lowerCamelCase_ = ImageFont.truetype(lowercase , encoding='UTF-8' , size=lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCamelCase_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , lowercase ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = temp_draw.textbbox((0, 0) , lowercase , lowercase )
# Create the actual image with a bit of padding around the text.
lowerCamelCase_ = text_width + left_padding + right_padding
lowerCamelCase_ = text_height + top_padding + bottom_padding
lowerCamelCase_ = Image.new('RGB' , (image_width, image_height) , lowercase )
lowerCamelCase_ = ImageDraw.Draw(lowercase )
draw.text(xy=(left_padding, top_padding) , text=lowercase , fill=lowercase , font=lowercase )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(lowercase , 'vision' )
# Convert to PIL image if necessary
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ = render_text(lowercase , **lowercase )
lowerCamelCase_ = max(header_image.width , image.width )
lowerCamelCase_ = int(image.height * (new_width / image.width) )
lowerCamelCase_ = int(header_image.height * (new_width / header_image.width) )
lowerCamelCase_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCamelCase_ = to_numpy_array(lowercase )
if infer_channel_dimension_format(lowercase ) == ChannelDimension.LAST:
lowerCamelCase_ = to_channel_dimension_format(lowercase , ChannelDimension.LAST )
return new_image
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''flattened_patches''']
def __init__( self : Dict , A_ : bool = True , A_ : bool = True , A_ : Dict[str, int] = None , A_ : int = 2048 , A_ : bool = False , **A_ : str , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowerCamelCase_ = do_normalize
lowerCamelCase_ = do_convert_rgb
lowerCamelCase_ = max_patches
lowerCamelCase_ = is_vqa
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : int , A_ : dict , **A_ : Any ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
lowerCamelCase_ = to_channel_dimension_format(A_ , ChannelDimension.FIRST )
lowerCamelCase_ = torch.from_numpy(A_ )
lowerCamelCase_ , lowerCamelCase_ = patch_size['height'], patch_size['width']
lowerCamelCase_ , lowerCamelCase_ = get_image_size(A_ )
# maximize scale s.t.
lowerCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , A_ ) , 1 )
lowerCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , A_ ) , 1 )
lowerCamelCase_ = max(num_feasible_rows * patch_height , 1 )
lowerCamelCase_ = max(num_feasible_cols * patch_width , 1 )
lowerCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=A_ , antialias=A_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCamelCase_ = torch_extract_patches(A_ , A_ , A_ )
lowerCamelCase_ = patches.shape
lowerCamelCase_ = patches_shape[1]
lowerCamelCase_ = patches_shape[2]
lowerCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCamelCase_ = torch.arange(A_ ).reshape([rows, 1] ).repeat(1 , A_ ).reshape([rows * columns, 1] )
lowerCamelCase_ = torch.arange(A_ ).reshape([1, columns] ).repeat(A_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCamelCase_ = row_ids.to(torch.floataa )
lowerCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCamelCase_ = torch.nn.functional.pad(A_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCamelCase_ = to_numpy_array(A_ )
return result
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
lowerCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowerCamelCase_ = np.mean(A_ )
lowerCamelCase_ = np.std(A_ )
lowerCamelCase_ = max(A_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A_ , mean=A_ , std=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : ImageInput , A_ : Optional[str] = None , A_ : bool = None , A_ : Optional[bool] = None , A_ : Optional[int] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Optional[int] , ) -> ImageInput:
"""simple docstring"""
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = patch_size if patch_size is not None else self.patch_size
lowerCamelCase_ = max_patches if max_patches is not None else self.max_patches
lowerCamelCase_ = self.is_vqa
if kwargs.get('data_format' , A_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowerCamelCase_ = kwargs.pop('font_bytes' , A_ )
lowerCamelCase_ = kwargs.pop('font_path' , A_ )
if isinstance(A_ , A_ ):
lowerCamelCase_ = [header_text] * len(A_ )
lowerCamelCase_ = [
render_header(A_ , header_text[i] , font_bytes=A_ , font_path=A_ )
for i, image in enumerate(A_ )
]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ ) for image in images]
# convert to torch tensor and permute
lowerCamelCase_ = [
self.extract_flattened_patches(image=A_ , max_patches=A_ , patch_size=A_ )
for image in images
]
# create attention mask in numpy
lowerCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCamelCase_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=A_ )
return encoded_outputs
| 208 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__):
_UpperCamelCase:int = 1
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = None )-> List[Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
lowerCamelCase_ =1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCamelCase_ =4
# running values
lowerCamelCase_ =[]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> int:
lowerCamelCase_ =num_inference_steps
lowerCamelCase_ =torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowerCamelCase_ =torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCamelCase_ =torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowerCamelCase_ =torch.sin(steps * math.pi / 2 ) ** 2
lowerCamelCase_ =(1.0 - self.betas**2) ** 0.5
lowerCamelCase_ =(torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowerCamelCase_ =timesteps.to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )-> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
lowerCamelCase_ =(self.timesteps == timestep).nonzero().item()
lowerCamelCase_ =timestep_index + 1
lowerCamelCase_ =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
lowerCamelCase_ =self.ets[-1]
elif len(self.ets ) == 2:
lowerCamelCase_ =(3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCamelCase_ =(23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowerCamelCase_ =(1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowerCamelCase_ =self._get_prev_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> torch.FloatTensor:
return sample
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =self.alphas[timestep_index]
lowerCamelCase_ =self.betas[timestep_index]
lowerCamelCase_ =self.alphas[prev_timestep_index]
lowerCamelCase_ =self.betas[prev_timestep_index]
lowerCamelCase_ =(sample - sigma * ets) / max(_SCREAMING_SNAKE_CASE , 1E-8 )
lowerCamelCase_ =next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self )-> List[Any]:
return self.config.num_train_timesteps
| 154 |
def __UpperCamelCase ( _A : Dict ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =[0] * len(_A )
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowerCamelCase_ =queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("""Cycle exists""" )
else:
print(_A )
# Adjacency List of Graph
__A : List[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 154 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (ksize % 2) == 0:
lowerCAmelCase : Optional[Any] = ksize + 1
lowerCAmelCase : Dict = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
lowerCAmelCase : str = x - ksize // 2
lowerCAmelCase : List[Any] = y - ksize // 2
# degree to radiant
lowerCAmelCase : Optional[Any] = theta / 1_8_0 * np.pi
lowerCAmelCase : List[Any] = np.cos(_theta )
lowerCAmelCase : List[str] = np.sin(_theta )
# get kernel x
lowerCAmelCase : Optional[Any] = cos_theta * px + sin_theta * py
# get kernel y
lowerCAmelCase : List[Any] = -sin_theta * px + cos_theta * py
# fill kernel
lowerCAmelCase : Dict = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__ = out / out.max() * 255
lowerCAmelCase__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 133 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase__ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , snake_case__ = " " ):
"""simple docstring"""
lowerCAmelCase : List[Any] = sentence_delimiter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return list(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = []
for sent_idx, sentence in enumerate(snake_case__ ):
chars.extend(self.process_string(snake_case__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase__ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCAmelCase__ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowerCAmelCase__ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )["wer"]
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 133 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[float]]:
snake_case : Optional[Any] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
snake_case : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
snake_case , snake_case : str = matrix[1][1], matrix[0][0]
snake_case , snake_case : int = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case : Optional[Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
snake_case : List[str] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case : List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case : List[str] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case : Tuple = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case : Any = array(lowercase )
for i in range(3 ):
for j in range(3 ):
snake_case : Dict = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case : Dict = array(lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase )
# Calculate the inverse of the matrix
return [[float(d(lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 124 |
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not isinstance(lowercase ,lowercase ):
snake_case : List[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if number < 1:
snake_case : int = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
snake_case : Any = int(math.log(number // 3 ,2 ) ) + 2
snake_case : List[Any] = [3, 5]
snake_case : Optional[int] = 2
snake_case : Union[str, Any] = 3
for block in range(1 ,lowercase ):
for _ in range(lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
lowerCamelCase : Optional[Any] = 0
try:
lowerCamelCase : Tuple = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 124 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase_ = None
UpperCamelCase_ = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase_ = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _snake_case :
'''simple docstring'''
A__ : bool = True
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "PIL.Image.Image"
A__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A__ : str = field(default="Image" , init=__snake_case , repr=__snake_case )
def __call__( self: Tuple ) -> Union[str, Any]:
return self.pa_type
def A__ ( self: Optional[Any] ,lowerCamelCase_: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = np.array(lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase_ ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase_ )
elif isinstance(lowerCamelCase_ ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A__ ( self: List[str] ,lowerCamelCase_: dict ,lowerCamelCase_: Union[str, Any]=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCamelCase_ ):
UpperCAmelCase_ : Any = PIL.Image.open(lowerCamelCase_ )
else:
UpperCAmelCase_ : int = path.split("""::""" )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(lowerCamelCase_ ,config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase_ : str = token_per_repo_id.get(lowerCamelCase_ )
except ValueError:
UpperCAmelCase_ : Optional[int] = None
with xopen(lowerCamelCase_ ,"""rb""" ,use_auth_token=lowerCamelCase_ ) as f:
UpperCAmelCase_ : Optional[int] = BytesIO(f.read() )
UpperCAmelCase_ : List[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A__ ( self: List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def A__ ( self: Tuple ,lowerCamelCase_: Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Tuple = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.binary() )
UpperCAmelCase_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : int = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
UpperCAmelCase_ : List[Any] = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCAmelCase_ : str = storage.field("""bytes""" )
else:
UpperCAmelCase_ : Dict = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCAmelCase_ : Optional[int] = storage.field("""path""" )
else:
UpperCAmelCase_ : Optional[int] = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : str = pa.array(
[encode_np_array(np.array(lowerCamelCase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Tuple = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays(
[bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ ,self.pa_type )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase_: Union[str, Any] ):
with xopen(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : str = f.read()
return bytes_
UpperCAmelCase_ : str = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ ,self.pa_type )
def lowerCamelCase_ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase_ ( _a : "PIL.Image.Image" ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : Union[str, Any] = image.format
else:
UpperCAmelCase_ : Optional[int] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_a , format=_a )
return buffer.getvalue()
def lowerCamelCase_ ( _a : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(_a , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_a )}
def lowerCamelCase_ ( _a : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
UpperCAmelCase_ : Optional[Any] = array.dtype
UpperCAmelCase_ : Optional[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCAmelCase_ : int = dtype.kind
UpperCAmelCase_ : Tuple = dtype.itemsize
UpperCAmelCase_ : List[str] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : List[Any] = dtype_byteorder + dtype_kind + str(_a )
UpperCAmelCase_ : Union[str, Any] = np.dtype(_a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : int = PIL.Image.fromarray(array.astype(_a ) )
return {"path": None, "bytes": image_to_bytes(_a )}
def lowerCamelCase_ ( _a : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = first_non_null_value(_a )
if isinstance(_a , _a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_a , np.ndarray ):
UpperCAmelCase_ : Dict = no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
elif isinstance(_a , PIL.Image.Image ):
UpperCAmelCase_ : Optional[int] = no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
else:
return objs
else:
return objs
| 59 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 59 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_UpperCamelCase : Tuple = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_UpperCamelCase : Tuple = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_UpperCamelCase : List[Any] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case__ ( datasets.Metric):
def A ( self : Any ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def A ( self : List[Any] ) -> Union[str, Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def A ( self : int , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any]=None , _A : Dict="uniform_average" , _A : str=True ) -> List[str]:
UpperCAmelCase_ : Dict = mean_squared_error(
_a , _a , sample_weight=_a , multioutput=_a , squared=_a )
return {"mse": mse}
| 304 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_A : Optional[Any] = 1_00
_A : Optional[int] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __magic_name__ ( __snake_case : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase : set[int] = set()
lowercase : int
lowercase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __magic_name__ ( __snake_case : int = 5000 ) -> int | None:
for number_to_partition in range(1 , __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 202 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : str = """xlm"""
UpperCamelCase_ : Any = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_01_45 , SCREAMING_SNAKE_CASE_ : Dict=20_48 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Tuple=16 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE_ : Dict=20_48**-0.5 , SCREAMING_SNAKE_CASE_ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int="first" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : List[str]=5 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A: int = vocab_size
A: Dict = emb_dim
A: int = n_layers
A: Optional[Any] = n_heads
A: List[Any] = dropout
A: Union[str, Any] = attention_dropout
A: int = gelu_activation
A: str = sinusoidal_embeddings
A: Union[str, Any] = causal
A: Tuple = asm
A: Optional[Any] = n_langs
A: Any = use_lang_emb
A: List[str] = layer_norm_eps
A: Tuple = bos_index
A: Tuple = eos_index
A: Optional[Any] = pad_index
A: Optional[Any] = unk_index
A: Optional[Any] = mask_index
A: List[str] = is_encoder
A: Any = max_position_embeddings
A: Optional[int] = embed_init_std
A: Tuple = init_std
A: Optional[Any] = summary_type
A: str = summary_use_proj
A: str = summary_activation
A: Optional[Any] = summary_proj_to_labels
A: Tuple = summary_first_dropout
A: Any = start_n_top
A: Union[str, Any] = end_n_top
A: Optional[int] = mask_token_id
A: Optional[int] = lang_id
if "n_words" in kwargs:
A: Optional[int] = kwargs['''n_words''']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 334 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger('''transformers.models.encodec''')
UpperCamelCase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCamelCase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCamelCase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCamelCase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCamelCase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase = []
UpperCamelCase = []
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
for attribute in key.split('''.''' ):
A: Union[str, Any] = getattr(__lowercase , __lowercase )
if weight_type is not None:
A: Tuple = getattr(__lowercase , __lowercase ).shape
else:
A: str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A: Dict = value
elif weight_type == "weight_g":
A: Tuple = value
elif weight_type == "weight_v":
A: Any = value
elif weight_type == "bias":
A: str = value
elif weight_type == "running_mean":
A: List[Any] = value
elif weight_type == "running_var":
A: Dict = value
elif weight_type == "num_batches_tracked":
A: List[str] = value
elif weight_type == "weight_ih_l0":
A: Dict = value
elif weight_type == "weight_hh_l0":
A: Optional[int] = value
elif weight_type == "bias_ih_l0":
A: List[Any] = value
elif weight_type == "bias_hh_l0":
A: str = value
elif weight_type == "weight_ih_l1":
A: Optional[int] = value
elif weight_type == "weight_hh_l1":
A: int = value
elif weight_type == "bias_ih_l1":
A: Optional[Any] = value
elif weight_type == "bias_hh_l1":
A: str = value
else:
A: Optional[int] = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A , A: Any = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Any = []
if model_name == "encodec_24khz" or "encodec_32khz":
A: List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A: List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(F"""{name} was ignored""" )
continue
A: Optional[int] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A , A: Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
A: str = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A: Optional[Any] = True
if "*" in mapped_key:
A: Any = name.split(__lowercase )[0].split('''.''' )[-2]
A: Tuple = mapped_key.replace('''*''' , __lowercase )
if "weight_g" in name:
A: str = '''weight_g'''
elif "weight_v" in name:
A: List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
A: Dict = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A: int = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A: Union[str, Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A: Tuple = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A: int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A: Optional[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A: Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A: str = '''bias_hh_l1'''
elif "bias" in name:
A: Union[str, Any] = '''bias'''
elif "weight" in name:
A: Dict = '''weight'''
elif "running_mean" in name:
A: Tuple = '''running_mean'''
elif "running_var" in name:
A: Any = '''running_var'''
elif "num_batches_tracked" in name:
A: str = '''num_batches_tracked'''
else:
A: Tuple = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict:
if config_path is not None:
A: Tuple = EncodecConfig.from_pretrained(__lowercase )
else:
A: Union[str, Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A: Union[str, Any] = [8, 5, 4, 4]
A: Dict = [2.2]
A: List[Any] = 6_4
A: Optional[Any] = 3_2_0_0_0
A: List[Any] = 2_0_4_8
A: Optional[Any] = False
A: int = False
A: Union[str, Any] = False
elif model_name == "encodec_48khz":
A: Optional[int] = [8, 5, 4, 2]
A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0]
A: List[Any] = 4_8_0_0_0
A: int = 2
A: List[Any] = False
A: Any = '''time_group_norm'''
A: Optional[Any] = True
A: Any = 1.0
A: Any = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
A: str = EncodecModel(__lowercase )
A: Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowercase )
A: Union[str, Any] = torch.load(__lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A: Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(__lowercase , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 334 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a_ ( *_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=True ,_lowerCAmelCase=2 ) -> List[str]:
from .. import __version__
__lowerCamelCase : Any = take_from
__lowerCamelCase : Optional[int] = ()
if not isinstance(args[0] ,_lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
__lowerCamelCase : Union[str, Any] = None
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCAmelCase ),)
__lowerCamelCase : Optional[Any] = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
values += (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
__lowerCamelCase : List[str] = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
__lowerCamelCase : Optional[Any] = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
__lowerCamelCase : Optional[int] = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,_lowerCAmelCase ,stacklevel=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCamelCase : List[str] = call_frame.filename
__lowerCamelCase : int = call_frame.lineno
__lowerCamelCase : Union[str, Any] = call_frame.function
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(_lowerCAmelCase ) == 0:
return
elif len(_lowerCAmelCase ) == 1:
return values[0]
return values
| 208 | 0 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
if n_term == "":
return []
__UpperCamelCase =[]
for temp in range(int(snake_case__ ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
__lowercase = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 365 |
"""simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : torch.FloatTensor
snake_case_ : torch.FloatTensor
snake_case_ : Optional[torch.FloatTensor] = None
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case_ : Union[str, Any] = 2
@register_to_config
def __init__( self : List[str] , snake_case__ : float = 0.02 , snake_case__ : float = 100 , snake_case__ : float = 1.007 , snake_case__ : float = 80 , snake_case__ : float = 0.05 , snake_case__ : float = 50 , ):
"""simple docstring"""
_UpperCAmelCase = sigma_max
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None # sigma(t_i)
def UpperCamelCase ( self : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
"""simple docstring"""
return sample
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
"""simple docstring"""
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(snake_case__ )
_UpperCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCAmelCase = torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase = self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
_UpperCAmelCase = sigma + gamma * sigma
_UpperCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : float , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
"""simple docstring"""
_UpperCAmelCase = sample_hat + sigma_hat * model_output
_UpperCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : float , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
"""simple docstring"""
_UpperCAmelCase = sample_prev + sigma_prev * model_output
_UpperCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str] ):
"""simple docstring"""
raise NotImplementedError()
| 133 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ : Optional[Any] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowercase_ : int = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Optional[int] = VOCAB_FILES_NAMES
snake_case_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = ["input_ids", "attention_mask"]
snake_case_ : str = GPTaTokenizer
def __init__( self : List[str] , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]="<|endoftext|>" , snake_case__ : str="<|endoftext|>" , snake_case__ : Optional[int]="<|endoftext|>" , snake_case__ : str=False , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
_UpperCAmelCase = kwargs.pop("add_bos_token" , snake_case__ )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case__ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case__ )
_UpperCAmelCase = add_prefix_space
def UpperCamelCase ( self : Union[str, Any] , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ):
"""simple docstring"""
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : "Conversation" ):
"""simple docstring"""
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 133 | 1 |
'''simple docstring'''
import numpy as np
def lowercase_ ( __A , __A , __A , __A , __A ) -> Union[str, Any]:
_snake_case = int(np.ceil((x_end - xa) / h ) )
_snake_case = np.zeros((n + 1,) )
_snake_case = ya
_snake_case = xa
for k in range(snake_case_ ):
_snake_case = f(snake_case_ , y[k] )
_snake_case = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_snake_case = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_snake_case = f(x + h , y[k] + h * ka )
_snake_case = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = []
_snake_case = 0
_snake_case = 0
def lowerCamelCase ( self ):
"""simple docstring"""
return self.head == self.tail
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.data.append(lowerCAmelCase_ )
_snake_case = self.tail + 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.data[self.head]
_snake_case = self.head + 1
return ret
def lowerCamelCase ( self ):
"""simple docstring"""
return self.tail - self.head
def lowerCamelCase ( self ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
_snake_case = None
_snake_case = None
_snake_case = 1
def lowerCamelCase ( self ):
"""simple docstring"""
return self.data
def lowerCamelCase ( self ):
"""simple docstring"""
return self.left
def lowerCamelCase ( self ):
"""simple docstring"""
return self.right
def lowerCamelCase ( self ):
"""simple docstring"""
return self.height
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = height
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('left rotation node:' , node.get_data() )
_snake_case = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('right rotation node:' , node.get_data() )
_snake_case = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__A ) )
return right_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__A ) )
return left_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
if node is None:
return MyNode(__A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_snake_case = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
else:
node.set_right(insert_node(node.get_right() , __A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_snake_case = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_snake_case = rl_rotation(__A )
else:
_snake_case = left_rotation(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
return node
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_right()
if right_child is None:
break
_snake_case = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_left()
if left_child is None:
break
_snake_case = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
_snake_case = root.get_left()
_snake_case = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_snake_case = get_left_most(__A )
root.set_data(__A )
root.set_right(del_node(__A , __A ) )
elif left_child is not None:
_snake_case = left_child
elif right_child is not None:
_snake_case = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__A , __A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__A , __A ) )
if get_height(__A ) - get_height(__A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_snake_case = left_rotation(__A )
else:
_snake_case = rl_rotation(__A )
elif get_height(__A ) - get_height(__A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
_snake_case = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__A )
return root
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = None
def lowerCamelCase ( self ):
"""simple docstring"""
return get_height(self.root )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('insert:' + str(lowerCAmelCase_ ) )
_snake_case = insert_node(self.root , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('delete:' + str(lowerCAmelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
_snake_case = del_node(self.root , lowerCAmelCase_ )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_snake_case = ''
_snake_case = MyQueue()
q.push(self.root )
_snake_case = self.get_height()
if layer == 0:
return output
_snake_case = 0
while not q.is_empty():
_snake_case = q.pop()
_snake_case = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase_ )
q.push(lowerCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_snake_case = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , lowerCAmelCase_ ) - 1:
_snake_case = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase : List[Any] = AVLtree()
lowercase : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 160 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class UpperCAmelCase ( A_ ):
@add_start_docstrings(snake_case__ )
def __call__(self : Union[str, Any] , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : Optional[int] ) -> bool:
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class UpperCAmelCase ( A_ ):
def __init__(self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] = None ) -> Any:
'''simple docstring'''
snake_case : List[str] = max_length
snake_case : Dict = max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__(self : str , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : int ) -> bool:
'''simple docstring'''
snake_case : Optional[Any] = input_ids.shape[-1]
snake_case : Union[str, Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , snake_case__ : int , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , snake_case__ , )
snake_case : Union[str, Any] = start_length
snake_case : Union[str, Any] = max_new_tokens
snake_case : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__(self : Optional[Any] , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : Any ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase ( A_ ):
def __init__(self : List[str] , snake_case__ : float , snake_case__ : Optional[float] = None ) -> Any:
'''simple docstring'''
snake_case : Tuple = max_time
snake_case : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__(self : Dict , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : str ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase ( A_ ):
@add_start_docstrings(snake_case__ )
def __call__(self : Any , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : List[str] ) -> bool:
'''simple docstring'''
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def UpperCamelCase ( __lowerCamelCase : StoppingCriteriaList , __lowerCamelCase : int ):
snake_case : Any = stopping_criteria.max_length
snake_case : Tuple = deepcopy(__lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , __lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__lowerCamelCase ) )
return new_stopping_criteria
| 59 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ):
snake_case : int = args.log_outputs
snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case : List[str] = load_metric("wer" )
snake_case : Tuple = load_metric("cer" )
# compute metrics
snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}"""
print(__lowerCamelCase )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(__lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case : int = f"""log_{dataset_id}_predictions.txt"""
snake_case : List[Any] = f"""log_{dataset_id}_targets.txt"""
with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t:
# mapping function to write output
def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(__lowerCamelCase , with_indices=__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case : Dict = " ".join(text.split(__lowerCamelCase ) )
return text
def UpperCamelCase ( __lowerCamelCase : int ):
# load dataset
snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) )
# load eval pipeline
if args.device is None:
snake_case : List[str] = 0 if torch.cuda.is_available() else -1
snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowerCamelCase : int ):
snake_case : Dict = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case : str = prediction["text"]
snake_case : Tuple = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase = parser.parse_args()
main(args)
| 59 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_A : int = '__DUMMY_TRANSFORMERS_USER__'
_A : List[Any] = 'Dummy User'
_A : Optional[Any] = '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'
_A : Optional[Any] = 'https://hub-ci.huggingface.co'
_A : Optional[Any] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_A : Union[str, Any] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_A : List[str] = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCAmelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
HfFolder.save_token(UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _a ( ) -> Dict:
"""simple docstring"""
return HfApi(endpoint=UpperCAmelCase )
@pytest.fixture(scope='''session''' )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
def _cleanup_repo(UpperCAmelCase ):
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = f"repo_txt_data-{int(time.time() * 1_0E3 )}"
lowerCamelCase__ : Tuple = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = f"repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"
lowerCamelCase__ : Tuple = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = f"repo_zipped_img_data-{int(time.time() * 1_0E3 )}"
lowerCamelCase__ : int = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = "timesformer"
def __init__( self : List[str] , A : int=2_2_4 , A : Optional[Any]=1_6 , A : str=3 , A : str=8 , A : Any=7_6_8 , A : Dict=1_2 , A : Optional[Any]=1_2 , A : Any=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.0 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : int=1e-6 , A : Tuple=True , A : Any="divided_space_time" , A : Optional[Any]=0 , **A : Tuple , ) ->str:
super().__init__(**A )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : int = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Optional[int] = num_frames
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = qkv_bias
lowerCamelCase__ : str = attention_type
lowerCamelCase__ : List[str] = drop_path_rate
| 265 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase ={
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_lowerCamelCase ={"allegro/herbert-base-cased": 5_14}
_lowerCamelCase ={}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = HerbertTokenizer
def __init__( self : Optional[int] ,snake_case : Any=None ,snake_case : str=None ,snake_case : Optional[Any]=None ,snake_case : List[str]="<s>" ,snake_case : Union[str, Any]="<unk>" ,snake_case : Dict="<pad>" ,snake_case : List[Any]="<mask>" ,snake_case : Any="</s>" ,**snake_case : Dict ,):
super().__init__(
snake_case ,snake_case ,tokenizer_file=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,sep_token=snake_case ,**snake_case ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self : List[Any] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ,snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def _lowerCAmelCase ( self : str ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.sep_token_id]
SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self : int ,snake_case : str ,snake_case : Optional[str] = None ):
SCREAMING_SNAKE_CASE =self._tokenizer.model.save(snake_case ,name=snake_case )
return tuple(snake_case )
| 334 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase : Any = logging.getLogger(__name__)
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=-1 ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = label_idx
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = mode.value
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , f'{mode}.txt' )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
else:
__SCREAMING_SNAKE_CASE = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : TextIO , __SCREAMING_SNAKE_CASE : TextIO , __SCREAMING_SNAKE_CASE : List ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__SCREAMING_SNAKE_CASE = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read().splitlines()
if "O" not in labels:
__SCREAMING_SNAKE_CASE = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read().splitlines()
if "O" not in labels:
__SCREAMING_SNAKE_CASE = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = mode.value
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , f'{mode}.txt' )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : TextIO , __SCREAMING_SNAKE_CASE : TextIO , __SCREAMING_SNAKE_CASE : List ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for sentence in parse_incr(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = preds_list[example_id]
__SCREAMING_SNAKE_CASE = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 361 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "retribert"
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 331 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = "cpu" , _UpperCAmelCase = None ) -> None:
lowerCamelCase =torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase =v.half()
if save_path is None: # overwrite src_path
lowerCamelCase =src_path
torch.save(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 352 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase__ : Optional[Any] ={
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __A ( a ):
__A = """ernie_m"""
__A = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCAmelCase_ = 250002 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 3072 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 514 , UpperCAmelCase_ = 0.0_2 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1E-05 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =vocab_size
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =max_position_embeddings
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =classifier_dropout
lowerCamelCase =is_decoder
lowerCamelCase =act_dropout
| 262 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[Any] = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase_ ):
_a = '''xlnet'''
_a = ['''mems''']
_a = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , A_ : Any=3_2_0_0_0 , A_ : Optional[int]=1_0_2_4 , A_ : List[str]=2_4 , A_ : str=1_6 , A_ : Optional[int]=4_0_9_6 , A_ : Optional[int]="gelu" , A_ : Any=True , A_ : Tuple="bi" , A_ : List[str]=0.02 , A_ : Any=1e-12 , A_ : str=0.1 , A_ : Any=5_1_2 , A_ : Optional[Any]=None , A_ : int=True , A_ : Any=False , A_ : Dict=False , A_ : Any=-1 , A_ : int=False , A_ : Optional[int]="last" , A_ : List[str]=True , A_ : Optional[int]="tanh" , A_ : Dict=0.1 , A_ : int=5 , A_ : Optional[int]=5 , A_ : Tuple=5 , A_ : Tuple=1 , A_ : Optional[int]=2 , **A_ : Optional[int] , ):
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Optional[int] = d_model
lowerCAmelCase_ : Tuple = n_layer
lowerCAmelCase_ : str = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""")
lowerCAmelCase_ : Optional[int] = d_model // n_head
lowerCAmelCase_ : str = ff_activation
lowerCAmelCase_ : int = d_inner
lowerCAmelCase_ : List[str] = untie_r
lowerCAmelCase_ : Dict = attn_type
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = dropout
lowerCAmelCase_ : Tuple = mem_len
lowerCAmelCase_ : Union[str, Any] = reuse_len
lowerCAmelCase_ : str = bi_data
lowerCAmelCase_ : str = clamp_len
lowerCAmelCase_ : Any = same_length
lowerCAmelCase_ : Union[str, Any] = summary_type
lowerCAmelCase_ : str = summary_use_proj
lowerCAmelCase_ : str = summary_activation
lowerCAmelCase_ : List[Any] = summary_last_dropout
lowerCAmelCase_ : int = start_n_top
lowerCAmelCase_ : Optional[int] = end_n_top
lowerCAmelCase_ : Union[str, Any] = bos_token_id
lowerCAmelCase_ : Tuple = pad_token_id
lowerCAmelCase_ : Optional[int] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , A_ , )
lowerCAmelCase_ : str = kwargs['''use_cache''']
lowerCAmelCase_ : Optional[int] = use_mems_eval
lowerCAmelCase_ : str = use_mems_train
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
@property
def UpperCAmelCase__ ( self : Tuple):
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""")
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self : Dict , A_ : Any):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""")
| 103 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=True , ):
__a : Dict = parent
__a : List[str] = batch_size
__a : str = num_channels
__a : Optional[int] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Dict = size_divisor
__a : Dict = do_rescale
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : str = GLPNImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_rescale''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 160 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase : Union[str, Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase : str = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
a : Union[str, Any] = None
# source code of `config_class`
a : List[str] = inspect.getsource(__lowerCamelCase )
a : Optional[Any] = _re_checkpoint.findall(__lowerCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
a : Tuple = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
a : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
a : Union[str, Any] = ckpt_name
break
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
"""simple docstring"""
a : str = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
a : Dict = get_checkpoint_from_config_class(__lowerCamelCase )
a : str = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a : List[str] = "\n".join(sorted(__lowerCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 360 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __lowerCAmelCase :
snake_case_ : Tuple = BlenderbotSmallConfig
snake_case_ : Any = {}
snake_case_ : List[str] = "gelu"
def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : int=13 , snake_case__ : int=7 , snake_case__ : Optional[int]=True , snake_case__ : int=False , snake_case__ : str=99 , snake_case__ : List[str]=32 , snake_case__ : List[str]=2 , snake_case__ : List[Any]=4 , snake_case__ : Any=37 , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=20 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=1 , snake_case__ : Union[str, Any]=0 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : Optional[int] , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
_UpperCAmelCase = inputs_dict["""input_ids"""]
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_UpperCAmelCase = inputs_dict["""head_mask"""]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ )[0]
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ):
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
snake_case_ : List[str] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
snake_case_ : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
snake_case_ : Optional[Any] = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = False
snake_case_ : Any = False
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotSmallModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
snake_case_ : Optional[Any] = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i\'m going to throw up.\nand why is that?"
]
snake_case_ : Optional[Any] = "facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" )
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
_UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 133 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
from __future__ import annotations
from fractions import Fraction
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> list[str]:
'''simple docstring'''
__snake_case : Optional[int] = []
__snake_case : Union[str, Any] = 11
__snake_case : int = int('1' + '0' * digit_len )
for num in range(__snake_case , __snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__snake_case , __snake_case ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__snake_case : int = 10
return solutions
def __UpperCAmelCase ( UpperCAmelCase_ : Any = 2 ) -> int:
'''simple docstring'''
__snake_case : Dict = 1.0
for fraction in fraction_list(__snake_case ):
__snake_case : Tuple = Fraction(__snake_case )
result *= frac.denominator / frac.numerator
return int(__snake_case )
if __name__ == "__main__":
print(solution())
| 361 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_a : List[str]= False
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : List[str]) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
return 12
@property
def _lowercase (self : Dict) -> Union[str, Any]:
return 12
@property
def _lowercase (self : int) -> Tuple:
return 32
@property
def _lowercase (self : Optional[int]) -> Dict:
torch.manual_seed(0)
__snake_case : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _lowercase (self : List[Any]) -> Optional[int]:
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def _lowercase (self : Union[str, Any]) -> Optional[int]:
torch.manual_seed(0)
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_A)
@property
def _lowercase (self : Union[str, Any]) -> Dict:
torch.manual_seed(0)
__snake_case : Any = 12
__snake_case : int = 12
__snake_case : List[Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__snake_case : Union[str, Any] = TransformeraDModel(**_A)
return model
def _lowercase (self : Union[str, Any]) -> Dict:
__snake_case : Tuple = 'cpu'
__snake_case : List[str] = self.dummy_vqvae
__snake_case : str = self.dummy_text_encoder
__snake_case : Optional[Any] = self.dummy_tokenizer
__snake_case : Dict = self.dummy_transformer
__snake_case : Optional[int] = VQDiffusionScheduler(self.num_embed)
__snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_A)
__snake_case : List[Any] = VQDiffusionPipeline(
vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
__snake_case : List[Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Optional[Any] = 'teddy bear playing in the pool'
__snake_case : str = torch.Generator(device=_A).manual_seed(0)
__snake_case : Union[str, Any] = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='np')
__snake_case : Optional[int] = output.images
__snake_case : int = torch.Generator(device=_A).manual_seed(0)
__snake_case : Tuple = pipe(
[prompt] , generator=_A , output_type='np' , return_dict=_A , num_inference_steps=2)[0]
__snake_case : str = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case : str = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowercase (self : Tuple) -> Optional[int]:
__snake_case : Optional[Any] = 'cpu'
__snake_case : Optional[int] = self.dummy_vqvae
__snake_case : List[str] = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_transformer
__snake_case : Union[str, Any] = VQDiffusionScheduler(self.num_embed)
__snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_A , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__snake_case : Union[str, Any] = VQDiffusionPipeline(
vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
__snake_case : Union[str, Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Union[str, Any] = 'teddy bear playing in the pool'
__snake_case : Optional[int] = torch.Generator(device=_A).manual_seed(0)
__snake_case : Tuple = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='np')
__snake_case : Optional[Any] = output.images
__snake_case : str = torch.Generator(device=_A).manual_seed(0)
__snake_case : Dict = pipe(
[prompt] , generator=_A , output_type='np' , return_dict=_A , num_inference_steps=2)[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case : Optional[Any] = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Any) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Tuple) -> Optional[int]:
__snake_case : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
__snake_case : Union[str, Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
__snake_case : Tuple = pipeline.to(_A)
pipeline.set_progress_bar_config(disable=_A)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__snake_case : Optional[int] = torch.Generator(device=_A).manual_seed(0)
__snake_case : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_A , output_type='np' , )
__snake_case : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image).max() < 2.0
| 95 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Dict = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCamelCase : Dict = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCamelCase : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowerCamelCase : Any = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowerCamelCase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowerCamelCase : List[str] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowerCamelCase : str = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowerCamelCase : Dict = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowerCamelCase : Optional[int] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowerCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_MAPPING
lowerCamelCase : int = auto_class_update(FlaxAutoModel)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class A__ ( _BaseAutoModelClass ):
A__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 47 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 331 | 0 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 0, __lowerCamelCase = 0 ):
_SCREAMING_SNAKE_CASE : Any = right or len(__lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCamelCase, __lowerCamelCase, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
_UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase = new_cost_f
_UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = -1
_UpperCAmelCase = set()
_UpperCAmelCase = set()
_UpperCAmelCase = {source: 0}
_UpperCAmelCase = {destination: 0}
_UpperCAmelCase = {source: None}
_UpperCAmelCase = {destination: None}
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
_UpperCAmelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase = shortest_distance
return shortest_path_distance
_a = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
_a = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 262 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCamelCase = logging.get_logger(__name__)
@dataclass
class _a ( _lowercase):
_a : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : Optional[int] = deprecated_arg[3:]
setattr(self , _SCREAMING_SNAKE_CASE , not kwargs.pop(_SCREAMING_SNAKE_CASE ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript )
lowerCAmelCase__ : Optional[int] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowerCAmelCase__ : Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**_SCREAMING_SNAKE_CASE )
_a : bool = field(default=_lowercase , metadata={'''help''': '''Trace the models using torchscript'''})
_a : bool = field(default=_lowercase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
_a : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def UpperCAmelCase__( self : str )-> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowerCAmelCase__ : List[Any] = torch.device('''cpu''' )
lowerCAmelCase__ : List[str] = 0
elif is_torch_tpu_available():
lowerCAmelCase__ : Optional[int] = xm.xla_device()
lowerCAmelCase__ : Union[str, Any] = 0
else:
lowerCAmelCase__ : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase__ : List[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__( self : int )-> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__( self : Any )-> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCAmelCase__( self : Optional[int] )-> Optional[Any]:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCAmelCase__( self : Tuple )-> Union[str, Any]:
return self.n_gpu > 0
| 211 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass):
_a : List[str] = FLAX_MODEL_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass):
_a : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass):
_a : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass):
_a : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass):
_a : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 211 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =R"\w+[.]\d+"
lowerCamelCase__: int =re.findall(__a , __a )
for pat in pats:
lowerCamelCase__: Optional[int] =key.replace(__a , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__: List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__: Union[str, Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__: int =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__: str =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__: int =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __a , __a , __a=42 ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: int ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__: int =flax_model.init_weights(PRNGKey(__a ) )
lowerCamelCase__: Optional[int] =flatten_dict(__a )
lowerCamelCase__: int ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__: List[str] =rename_key(__a )
lowerCamelCase__: str =tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__: List[str] =rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Union[str, Any] =jnp.asarray(__a )
return unflatten_dict(__a )
| 10 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a__ ( _UpperCAmelCase ):
_a : List[Any] = 'mobilenet_v2'
def __init__( self , _A=3 , _A=2_2_4 , _A=1.0 , _A=8 , _A=8 , _A=6 , _A=3_2 , _A=True , _A=True , _A="relu6" , _A=True , _A=0.8 , _A=0.02 , _A=0.0_01 , _A=2_5_5 , **_A , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = depth_multiplier
__lowerCAmelCase = depth_divisible_by
__lowerCAmelCase = min_depth
__lowerCAmelCase = expand_ratio
__lowerCAmelCase = output_stride
__lowerCAmelCase = first_layer_is_expansion
__lowerCAmelCase = finegrained_output
__lowerCAmelCase = hidden_act
__lowerCAmelCase = tf_padding
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = semantic_loss_ignore_index
class a__ ( _UpperCAmelCase ):
_a : Optional[Any] = version.parse("""1.11""" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1E-4
| 355 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A , return_dict=_A )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "google/ncsnpp-church-256"
__lowerCAmelCase = UNetaDModel.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
UpperCAmelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase ):
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = matrix_length // 2
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def a_ ( lowerCamelCase ):
return len(lowerCamelCase ), len(matrix[0] )
def a_ ( lowerCamelCase ):
print('\n'.join(str(lowerCamelCase ) for line in matrix ) )
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
UpperCAmelCase__ = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase__ = max(*lowerCamelCase , *lowerCamelCase )
UpperCAmelCase__ = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
UpperCAmelCase__ = matrixa
UpperCAmelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase__ : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 98 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 95 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 361 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
__lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Dict = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' )
__lowerCamelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Union[str, Any] = (39, 42)
__lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 42, 1_024) )
__lowerCamelCase : int = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) )
__lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : int = torch.Size((1, 1, 768) )
__lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = {}
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' )
__lowerCamelCase : Any = index
return entity_vocab
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class a__ ( A__ ):
A = 'git_vision_model'
def __init__( self : Any,_A : str=768,_A : Union[str, Any]=3072,_A : List[Any]=12,_A : Dict=12,_A : List[Any]=3,_A : Union[str, Any]=224,_A : Any=16,_A : Union[str, Any]="quick_gelu",_A : List[str]=1E-5,_A : str=0.0,_A : Tuple=0.02,**_A : Tuple,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = hidden_act
@classmethod
def __UpperCamelCase ( cls : int,_A : Union[str, os.PathLike],**_A : str ):
"""simple docstring"""
cls._set_token_in_kwargs(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = cls.get_config_dict(_A,**_A )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A,**_A )
class a__ ( A__ ):
A = 'git'
def __init__( self : Dict,_A : Optional[Any]=None,_A : int=3_0522,_A : Dict=768,_A : str=6,_A : str=12,_A : Optional[int]=3072,_A : List[str]="gelu",_A : Tuple=0.1,_A : Optional[int]=0.1,_A : Any=1024,_A : Optional[Any]=0.02,_A : Optional[int]=1E-12,_A : Any=0,_A : List[str]="absolute",_A : List[str]=True,_A : List[str]=False,_A : Optional[int]=101,_A : Optional[int]=102,_A : List[str]=None,**_A : str,):
"""simple docstring"""
super().__init__(bos_token_id=_A,eos_token_id=_A,pad_token_id=_A,**_A )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ : List[str] = GitVisionConfig(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : Tuple = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Any = num_image_with_embedding
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : Dict = eos_token_id
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__class__.model_type
return output
| 18 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 175 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( A_ : Dict, A_ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : List[str] = False
elif args.student_type == "gpt2":
_lowerCamelCase : Any = False
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Optional[int] = False
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''', action='''store_true''', help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''', type=A_, required=A_, help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''', type=A_, required=A_, help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''', )
parser.add_argument(
'''--student_type''', type=A_, choices=['''distilbert''', '''roberta''', '''gpt2'''], required=A_, help='''The student type (DistilBERT, RoBERTa).''', )
parser.add_argument('''--student_config''', type=A_, required=A_, help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''', default=A_, type=A_, help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''', choices=['''bert''', '''roberta''', '''gpt2'''], required=A_, help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''', type=A_, required=A_, help='''The teacher model.''' )
parser.add_argument('''--temperature''', default=2.0, type=A_, help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''', default=0.5, type=A_, help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''', default=0.0, type=A_, help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''', )
parser.add_argument('''--alpha_clm''', default=0.5, type=A_, help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''', default=0.0, type=A_, help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''', default=0.0, type=A_, help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''', action='''store_true''', help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''', default=0.15, type=A_, help='''Proportion of tokens for which we need to make a prediction.''', )
parser.add_argument('''--word_mask''', default=0.8, type=A_, help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''', default=0.1, type=A_, help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''', default=0.1, type=A_, help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''', default=0.7, type=A_, help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''', )
parser.add_argument('''--token_counts''', type=A_, help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''', action='''store_true''', help='''If true, compute the distillation loss only the [MLM] prediction distribution.''', )
parser.add_argument(
'''--freeze_pos_embs''', action='''store_true''', help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''', )
parser.add_argument(
'''--freeze_token_type_embds''', action='''store_true''', help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''', )
parser.add_argument('''--n_epoch''', type=A_, default=3, help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''', type=A_, default=5, help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''', action='''store_false''', help='''If true, group sequences that have similar length into the same batch. Default is true.''', )
parser.add_argument(
'''--gradient_accumulation_steps''', type=A_, default=50, help='''Gradient accumulation for larger training batches.''', )
parser.add_argument('''--warmup_prop''', default=0.05, type=A_, help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''', default=0.0, type=A_, help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''', default=5E-4, type=A_, help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''', default=1E-6, type=A_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', default=5.0, type=A_, help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''', default=0.02, type=A_, help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=A_, default='''O1''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_gpu''', type=A_, default=1, help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''', type=A_, default=-1, help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''', type=A_, default=56, help='''Random seed''' )
parser.add_argument('''--log_interval''', type=A_, default=5_00, help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''', type=A_, default=40_00, help='''Checkpoint interval.''' )
_lowerCamelCase : List[Any] = parser.parse_args()
sanity_checks(A_ )
# ARGS #
init_gpu_params(A_ )
set_seed(A_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, '''parameters.json''' ), '''w''' ) as f:
json.dump(vars(A_ ), A_, indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase : Optional[int] = tokenizer.all_special_tokens.index(A_ )
_lowerCamelCase : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCamelCase : Optional[Any] = special_tok_ids
_lowerCamelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, '''rb''' ) as fp:
_lowerCamelCase : Any = pickle.load(A_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, '''rb''' ) as fp:
_lowerCamelCase : str = pickle.load(A_ )
_lowerCamelCase : List[Any] = np.maximum(A_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase : List[Any] = 0.0 # do not predict special tokens
_lowerCamelCase : str = torch.from_numpy(A_ )
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = LmSeqsDataset(params=A_, data=A_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCamelCase : str = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCamelCase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights, config=A_ )
else:
_lowerCamelCase : Optional[Any] = student_model_class(A_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase : int = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=A_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A_, A_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A_, A_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase : Optional[int] = Distiller(
params=A_, dataset=A_, token_probs=A_, student=A_, teacher=A_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 175 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase_ = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCAmelCase (__A , __A , __A , __A=None):
"""simple docstring"""
_a = XLNetConfig.from_json_file(__A)
_a = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''')
_a = finetuning_task
_a = GLUE_TASKS_NUM_LABELS[finetuning_task]
_a = XLNetForSequenceClassification(__A)
elif "squad" in finetuning_task:
_a = finetuning_task
_a = XLNetForQuestionAnswering(__A)
else:
_a = XLNetLMHeadModel(__A)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A)
# Save pytorch-model
_a = os.path.join(__A , __A)
_a = os.path.join(__A , __A)
print(F'''Save PyTorch model to {os.path.abspath(__A)}''')
torch.save(model.state_dict() , __A)
print(F'''Save configuration file to {os.path.abspath(__A)}''')
with open(__A , '''w''' , encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 211 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : int
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(__A))]
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
_a = all_rotations(__A)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(__A),
}
return response
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
_a = int(__A)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(__A):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
_a = [''''''] * len(__A)
for _ in range(len(__A)):
for i in range(len(__A)):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = "Provide a string that I will generate its BWT transform: "
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 211 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE :int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__SCREAMING_SNAKE_CASE :str = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__SCREAMING_SNAKE_CASE :str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE :str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = '''lower newer'''
__SCREAMING_SNAKE_CASE :Tuple = '''lower newer'''
return input_text, output_text
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = BioGptTokenizer(self.vocab_file ,self.merges_file )
__SCREAMING_SNAKE_CASE :List[str] = '''lower'''
__SCREAMING_SNAKE_CASE :Tuple = ['''low''', '''er</w>''']
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = tokens + ['''<unk>''']
__SCREAMING_SNAKE_CASE :str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE :Any = tokenizer.encode('''sequence builders''' ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 239 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCamelCase ( a_ : int , a_ : int , a_ : bool , a_ : list[int] , a_ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(a_ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
def __lowerCamelCase ( ) -> None:
__SCREAMING_SNAKE_CASE :Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__SCREAMING_SNAKE_CASE :Optional[int] = math.log(len(a_ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , a_ , a_ , a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 239 | 1 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase_ ( _snake_case ,_snake_case ):
assert isinstance(_snake_case ,_snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : Optional[int] = SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ,keep_in_memory=_snake_case ).read()
_check_sql_dataset(_snake_case ,_snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : Any = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_snake_case ,cache_dir=_snake_case ).read()
_check_sql_dataset(_snake_case ,_snake_case )
def lowercase_ ( _snake_case ):
with contextlib.closing(sqlitea.connect(_snake_case ) ) as con:
SCREAMING_SNAKE_CASE__ : Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Any = os.path.join(_snake_case ,"""tmp.sql""" )
SCREAMING_SNAKE_CASE__ : Dict = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ).read()
SqlDatasetWriter(_snake_case ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
SCREAMING_SNAKE_CASE__ : List[str] = iter_sql_file(_snake_case )
SCREAMING_SNAKE_CASE__ : int = iter_sql_file(_snake_case )
for rowa, rowa in zip(_snake_case ,_snake_case ):
assert rowa == rowa
@require_sqlalchemy
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(_snake_case ,"""tmp.sql""" )
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ).read()
SqlDatasetWriter(_snake_case ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
SCREAMING_SNAKE_CASE__ : Any = iter_sql_file(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = iter_sql_file(_snake_case )
for rowa, rowa in zip(_snake_case ,_snake_case ):
assert rowa == rowa
@require_sqlalchemy
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_snake_case ,"""tmp.sql""" )
SCREAMING_SNAKE_CASE__ : Any = SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_snake_case ).read()
with pytest.raises(_snake_case ):
SqlDatasetWriter(_snake_case ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 25 |
"""simple docstring"""
import math
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _snake_case : float = 0.1 ) ->int:
"""simple docstring"""
__snake_case : Tuple = 3
__snake_case : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__snake_case : List[str] = logging.get_logger(__name__)
# General docstring
__snake_case : int = 'RegNetConfig'
# Base docstring
__snake_case : Dict = 'facebook/regnet-y-040'
__snake_case : List[str] = [1, 1_088, 7, 7]
# Image classification docstring
__snake_case : List[str] = 'facebook/regnet-y-040'
__snake_case : Tuple = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 3 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[str] = "relu" , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCAmelCase : List[str] = tf.keras.layers.ConvaD(
filters=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , strides=_SCREAMING_SNAKE_CASE , padding="VALID" , groups=_SCREAMING_SNAKE_CASE , use_bias=_SCREAMING_SNAKE_CASE , name="convolution" , )
__lowerCAmelCase : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
__lowerCAmelCase : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.convolution(self.padding(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Tuple = self.normalization(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.activation(_SCREAMING_SNAKE_CASE)
return hidden_state
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: RegNetConfig , **_SCREAMING_SNAKE_CASE: List[str]) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_channels
__lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = shape_list(_SCREAMING_SNAKE_CASE)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCAmelCase : Union[str, Any] = tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 2, 3, 1))
__lowerCAmelCase : Union[str, Any] = self.embedder(_SCREAMING_SNAKE_CASE)
return hidden_state
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 2 , **_SCREAMING_SNAKE_CASE: int) -> Any:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = tf.keras.layers.ConvaD(
filters=_SCREAMING_SNAKE_CASE , kernel_size=1 , strides=_SCREAMING_SNAKE_CASE , use_bias=_SCREAMING_SNAKE_CASE , name="convolution")
__lowerCAmelCase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: tf.Tensor , _SCREAMING_SNAKE_CASE: bool = False) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(_SCREAMING_SNAKE_CASE) , training=_SCREAMING_SNAKE_CASE)
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_SCREAMING_SNAKE_CASE , name="pooler")
__lowerCAmelCase : Dict = [
tf.keras.layers.ConvaD(filters=_SCREAMING_SNAKE_CASE , kernel_size=1 , activation="relu" , name="attention.0"),
tf.keras.layers.ConvaD(filters=_SCREAMING_SNAKE_CASE , kernel_size=1 , activation="sigmoid" , name="attention.2"),
]
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.pooler(_SCREAMING_SNAKE_CASE)
for layer_module in self.attention:
__lowerCAmelCase : Union[str, Any] = layer_module(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = hidden_state * pooled
return hidden_state
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: RegNetConfig , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 1 , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
__lowerCAmelCase : Any = max(1 , out_channels // config.groups_width)
__lowerCAmelCase : Union[str, Any] = (
TFRegNetShortCut(_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCAmelCase : Tuple = [
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act , name="layer.1"),
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE , name="layer.2"),
]
__lowerCAmelCase : Dict = ACTaFN[config.hidden_act]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = hidden_state
for layer_module in self.layers:
__lowerCAmelCase : Dict = layer_module(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.shortcut(_SCREAMING_SNAKE_CASE)
hidden_state += residual
__lowerCAmelCase : Optional[Any] = self.activation(_SCREAMING_SNAKE_CASE)
return hidden_state
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: RegNetConfig , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 1 , **_SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = in_channels != out_channels or stride != 1
__lowerCAmelCase : Any = max(1 , out_channels // config.groups_width)
__lowerCAmelCase : Any = (
TFRegNetShortCut(_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
__lowerCAmelCase : Union[str, Any] = [
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act , name="layer.1"),
TFRegNetSELayer(_SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4)) , name="layer.2"),
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE , name="layer.3"),
]
__lowerCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCAmelCase : Tuple = layer_module(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.shortcut(_SCREAMING_SNAKE_CASE)
hidden_state += residual
__lowerCAmelCase : Optional[int] = self.activation(_SCREAMING_SNAKE_CASE)
return hidden_state
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: RegNetConfig , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 2 , _SCREAMING_SNAKE_CASE: int = 2 , **_SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__lowerCAmelCase : Union[str, Any] = [
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , name="layers.0"),
*[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , name=F"""layers.{i+1}""") for i in range(depth - 1)],
]
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
for layer_module in self.layers:
__lowerCAmelCase : Optional[int] = layer_module(_SCREAMING_SNAKE_CASE)
return hidden_state
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: RegNetConfig , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ))
__lowerCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(_SCREAMING_SNAKE_CASE , config.depths[1:])):
self.stages.append(TFRegNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE , name=F"""stages.{i+1}"""))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: tf.Tensor , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = True) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
__lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase : Tuple = hidden_states + (hidden_state,)
__lowerCAmelCase : Dict = stage_module(_SCREAMING_SNAKE_CASE)
if output_hidden_states:
__lowerCAmelCase : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
@keras_serializable
class A__ ( tf.keras.layers.Layer ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = RegNetConfig
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = config
__lowerCAmelCase : List[Any] = TFRegNetEmbeddings(_SCREAMING_SNAKE_CASE , name="embedder")
__lowerCAmelCase : Optional[Any] = TFRegNetEncoder(_SCREAMING_SNAKE_CASE , name="encoder")
__lowerCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_SCREAMING_SNAKE_CASE , name="pooler")
@unpack_inputs
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: tf.Tensor , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowerCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Dict = self.embedder(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = encoder_outputs[0]
__lowerCAmelCase : Optional[Any] = self.pooler(_SCREAMING_SNAKE_CASE)
# Change to NCHW output format have uniformity in the modules
__lowerCAmelCase : Optional[Any] = tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2))
__lowerCAmelCase : Tuple = tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCAmelCase : Dict = tuple([tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = RegNetConfig
SCREAMING_SNAKE_CASE = 'regnet'
SCREAMING_SNAKE_CASE = 'pixel_values'
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)}
__snake_case : List[Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: RegNetConfig , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: List[Any]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = TFRegNetMainLayer(_SCREAMING_SNAKE_CASE , name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: tf.Tensor , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: List[str]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
__lowerCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Optional[Any] = self.regnet(
pixel_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: RegNetConfig , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: int) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = config.num_labels
__lowerCAmelCase : Dict = TFRegNetMainLayer(_SCREAMING_SNAKE_CASE , name="regnet")
# classification head
__lowerCAmelCase : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: tf.Tensor = None , _SCREAMING_SNAKE_CASE: tf.Tensor = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: List[str]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.regnet(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase : str = self.classifier[0](_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.classifier[1](_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = None if labels is None else self.hf_compute_loss(labels=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states)
| 358 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = model(input_ids.to(_SCREAMING_SNAKE_CASE) , labels=labels.to(_SCREAMING_SNAKE_CASE)).loss
__lowerCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 58 | 0 |
from collections import deque
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : str =len(SCREAMING_SNAKE_CASE )
a__ : Tuple =deque()
a__ : List[Any] =[False for _ in range(SCREAMING_SNAKE_CASE )]
a__ : int =[-1 for _ in range(SCREAMING_SNAKE_CASE )]
a__ : List[Any] =index_of[:]
def strong_connect(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
a__ : Optional[int] =index # the number when this node is seen
a__ : Any =index # lowest rank node reachable from here
index += 1
stack.append(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =True
for w in g[v]:
if index_of[w] == -1:
a__ : Union[str, Any] =strong_connect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : int =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
a__ : Optional[int] =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
a__ : Optional[int] =[]
a__ : str =stack.pop()
a__ : Any =False
component.append(SCREAMING_SNAKE_CASE )
while w != v:
a__ : str =stack.pop()
a__ : Tuple =False
component.append(SCREAMING_SNAKE_CASE )
components.append(SCREAMING_SNAKE_CASE )
return index
a__ : Optional[Any] =[]
for v in range(SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return components
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase : int = 7
UpperCAmelCase : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase : Optional[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase : Tuple = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 95 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
_UpperCamelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_UpperCamelCase = np.concatenate(__snake_case, axis=0 )
_UpperCamelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCamelCase = image.transpose(0, 3, 1, 2 )
_UpperCamelCase = 2.0 * image - 1.0
_UpperCamelCase = torch.from_numpy(__snake_case )
elif isinstance(image[0], torch.Tensor ):
_UpperCamelCase = torch.cat(__snake_case, dim=0 )
return image
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=0.9995 ) -> List[Any]:
"""simple docstring"""
if not isinstance(__snake_case, np.ndarray ):
_UpperCamelCase = True
_UpperCamelCase = va.device
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
_UpperCamelCase = (1 - t) * va + t * va
else:
_UpperCamelCase = np.arccos(__snake_case )
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = theta_a * t
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase = sin_theta_t / sin_theta_a
_UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase = torch.from_numpy(__snake_case ).to(__snake_case )
return va
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
_UpperCamelCase = value
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
_UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a)
else feature_extractor.size['''shortest_edge''']
)
_UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , __a)
set_requires_grad(self.clip_model , __a)
def UpperCAmelCase ( self , __a = "auto") -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> Tuple:
'''simple docstring'''
if not isinstance(__a , torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a):
_UpperCamelCase = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__a)
]
_UpperCamelCase = torch.cat(__a , dim=0)
else:
_UpperCamelCase = self.vae.encode(__a).latent_dist.sample(__a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 0.1_8215 * init_latents
_UpperCamelCase = init_latents.repeat_interleave(__a , dim=0)
_UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a)
# get latents
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = self.coca_transform(__a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
_UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('''<end_of_text>''')[0].replace('''<start_of_text>''' , '''''').rstrip(''' .,''')
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extractor.preprocess(__a)
_UpperCamelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0]).unsqueeze(0).to(self.device).half()
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = latents.detach().requires_grad_()
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase = torch.sqrt(__a)
_UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.sigmas[index]
_UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * sample
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = transforms.Resize(self.feature_extractor_size)(__a)
_UpperCamelCase = self.normalize(__a).to(latents.dtype)
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = spherical_dist_loss(__a , __a).mean() * clip_guidance_scale
_UpperCamelCase = -torch.autograd.grad(__a , __a)[0]
if isinstance(self.scheduler , __a):
_UpperCamelCase = latents.detach() + grads * (sigma**2)
_UpperCamelCase = noise_pred_original
else:
_UpperCamelCase = noise_pred_original - torch.sqrt(__a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> Dict:
'''simple docstring'''
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__a)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(__a , torch.Generator) and batch_size > 1:
_UpperCamelCase = [generator] + [None] * (batch_size - 1)
_UpperCamelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase = ''', '''.join(__a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
if style_prompt is None:
if len(__a):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
# get prompt text embeddings for content and style
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = slerp(__a , __a , __a)
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0)
# set timesteps
_UpperCamelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_UpperCamelCase = {}
if accepts_offset:
_UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# Preprocess image
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = slerp(__a , __a , __a)
if clip_guidance_scale > 0:
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = slerp(
__a , __a , __a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = content_text_input.input_ids.shape[-1]
_UpperCamelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=__a , return_tensors='''pt''')
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase = torch.randn(__a , generator=__a , device='''cpu''' , dtype=__a).to(
self.device)
else:
_UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_UpperCamelCase = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
# check if the scheduler accepts generator
_UpperCamelCase = '''generator''' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_UpperCamelCase = generator
with self.progress_bar(total=__a):
for i, t in enumerate(__a):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2)
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase , _UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a)
| 194 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCamelCase(__UpperCamelCase = "https://www.worldometers.info/coronavirus" ) -> dict:
_lowerCAmelCase =BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
_lowerCAmelCase =soup.findAll("""h1""" )
_lowerCAmelCase =soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCamelCase , __UpperCamelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Optional[int] = json.loads(f.read() )
UpperCamelCase_ : int = collections.OrderedDict()
UpperCamelCase_ : Any = collections.OrderedDict()
UpperCamelCase_ : Optional[int] = collections.OrderedDict()
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Union[str, Any] = f.readlines()
UpperCamelCase_ : Optional[int] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase ):
UpperCamelCase_ : Any = b
UpperCamelCase_ : str = idx
for wd in b:
UpperCamelCase_ : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any]="<|endoftext|>" , snake_case : str="<|endoftext|>" , snake_case : List[str]="<|startoftext|>" , snake_case : str="<|endoftext|>" , snake_case : str=False , **snake_case : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , )
if not os.path.isfile(snake_case ):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
UpperCamelCase_ : Tuple = do_clean_text
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[str] = load_vocab_and_emoji(snake_case , snake_case )
UpperCamelCase_ : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Optional[int] ) -> str:
"""simple docstring"""
return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ''.join(snake_case ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : "Conversation" ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
UpperCamelCase_ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 0
if os.path.isdir(snake_case ):
UpperCamelCase_ : Tuple = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Dict = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
UpperCamelCase_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
UpperCamelCase_ : Any = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase_ : Union[str, Any] = token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , snake_case )
return vocab_file, emoji_file
class _lowercase ( snake_case_ ):
def __init__( self : Union[str, Any] , snake_case : List[str] , snake_case : int , snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : int = vocab # same as swe
UpperCamelCase_ : Tuple = ids_to_tokens # same as bpe
UpperCamelCase_ : List[Any] = emoji
UpperCamelCase_ : Optional[int] = np.max([len(snake_case ) for w in self.vocab.keys()] )
UpperCamelCase_ : Dict = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
UpperCamelCase_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
UpperCamelCase_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
UpperCamelCase_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
UpperCamelCase_ : List[str] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
UpperCamelCase_ : Optional[Any] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
UpperCamelCase_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
UpperCamelCase_ : str = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
UpperCamelCase_ : Any = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Dict ) -> Any:
"""simple docstring"""
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.content_repattera.sub('<URL>' , snake_case )
UpperCamelCase_ : Optional[int] = self.content_repattera.sub('<EMAIL>' , snake_case )
UpperCamelCase_ : Any = self.content_repattera.sub('<TEL>' , snake_case )
UpperCamelCase_ : Any = self.content_repattera.sub('<DATE>' , snake_case )
UpperCamelCase_ : Union[str, Any] = self.content_repattera.sub('<DATE>' , snake_case )
UpperCamelCase_ : int = self.content_repattera.sub('<PRICE>' , snake_case )
UpperCamelCase_ : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCamelCase_ : Optional[Any] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : str , snake_case : Dict=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = text.replace(' ' , '<SP>' )
UpperCamelCase_ : Any = text.replace(' ' , '<SP>' )
UpperCamelCase_ : List[str] = text.replace('\r\n' , '<BR>' )
UpperCamelCase_ : Union[str, Any] = text.replace('\n' , '<BR>' )
UpperCamelCase_ : List[Any] = text.replace('\r' , '<BR>' )
UpperCamelCase_ : Dict = text.replace('\t' , '<TAB>' )
UpperCamelCase_ : List[Any] = text.replace('—' , 'ー' )
UpperCamelCase_ : Dict = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCamelCase_ : str = text.replace(snake_case , snake_case )
if clean:
UpperCamelCase_ : List[str] = self.clean_text(snake_case )
def check_simbol(snake_case : Optional[Any] ):
UpperCamelCase_ : Dict = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
UpperCamelCase_ : List[str] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case : Optional[Any] ):
UpperCamelCase_ : Optional[Any] = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
UpperCamelCase_ : Optional[int] = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[int] = []
while pos < len(snake_case ):
UpperCamelCase_ : Optional[int] = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
UpperCamelCase_ : Union[str, Any] = [] # (token_id, token, pos)
for e in range(snake_case , snake_case , -1 ):
UpperCamelCase_ : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
UpperCamelCase_ : Dict = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : int = sorted(snake_case , key=lambda snake_case : x[0] )[0]
result.append(snake_case )
UpperCamelCase_ : int = e
else:
UpperCamelCase_ : List[Any] = pos + 1
UpperCamelCase_ : Union[str, Any] = text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
UpperCamelCase_ : int = end
return result
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Optional[Any] , snake_case : List[Any]="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
UpperCamelCase_ : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
UpperCamelCase_ : Dict = ''.join(snake_case )
return text
| 175 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ): # noqa: E741
while r - l > 1:
UpperCamelCase_ : Union[str, Any] = (l + r) // 2
if v[m] >= key:
UpperCamelCase_ : str = m
else:
UpperCamelCase_ : List[Any] = m # noqa: E741
return r
def __lowercase ( lowerCamelCase : list[int] ):
if len(lowerCamelCase ) == 0:
return 0
UpperCamelCase_ : Tuple = [0] * len(lowerCamelCase )
UpperCamelCase_ : int = 1
UpperCamelCase_ : Dict = v[0]
for i in range(1 , len(lowerCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase_ : Any = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase_ : Dict = v[i]
length += 1
else:
UpperCamelCase_ : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
'''simple docstring'''
lowercase__ = 9.8_06_65
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 83 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """efficientnet"""
def __init__( self : str , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 600 , UpperCamelCase__ : float = 2.0 , UpperCamelCase__ : float = 3.1 , UpperCamelCase__ : int = 8 , UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ : List[int] = [] , UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ : float = 0.25 , UpperCamelCase__ : str = "swish" , UpperCamelCase__ : int = 2560 , UpperCamelCase__ : str = "mean" , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 0.001 , UpperCamelCase__ : float = 0.99 , UpperCamelCase__ : float = 0.5 , UpperCamelCase__ : float = 0.2 , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Dict = num_channels
snake_case : List[Any] = image_size
snake_case : Any = width_coefficient
snake_case : int = depth_coefficient
snake_case : List[str] = depth_divisor
snake_case : Tuple = kernel_sizes
snake_case : Optional[Any] = in_channels
snake_case : Optional[Any] = out_channels
snake_case : Dict = depthwise_padding
snake_case : Optional[Any] = strides
snake_case : List[str] = num_block_repeats
snake_case : Any = expand_ratios
snake_case : Any = squeeze_expansion_ratio
snake_case : Optional[Any] = hidden_act
snake_case : Optional[int] = hidden_dim
snake_case : Dict = pooling_type
snake_case : Any = initializer_range
snake_case : Optional[Any] = batch_norm_eps
snake_case : Tuple = batch_norm_momentum
snake_case : Any = dropout_rate
snake_case : str = drop_connect_rate
snake_case : Dict = sum(UpperCamelCase__ ) * 4
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
return 1e-5
| 83 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
lowercase_ : List[str] = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
lowercase_ : Tuple = numbers[i]
if number < 0:
lowercase_ , lowercase_ : Union[str, Any] = min_till_now, max_till_now
lowercase_ : Dict = max(UpperCAmelCase__ , max_till_now * number )
lowercase_ : Any = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
lowercase_ : Tuple = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 239 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ):
lowercase_ : List[str] = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 239 | 1 |
'''simple docstring'''
def a__ ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_UpperCamelCase = 4
_UpperCamelCase = (1 << p) - 1
for _ in range(p - 2 ):
_UpperCamelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 356 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(lowercase, x % y )
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(lowercase, lowercase )
def a__ ( lowercase : int = 20 ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
for i in range(1, n + 1 ):
_UpperCamelCase = lcm(lowercase, lowercase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287 | 0 |
"""simple docstring"""
import math
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> str:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
while num > 0:
SCREAMING_SNAKE_CASE = num % 8
SCREAMING_SNAKE_CASE = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
SCREAMING_SNAKE_CASE = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(__lowerCamelCase )}'
def lowercase () -> None:
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 113 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self , A ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def __call__( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized
_SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A )} operations while'
f' destination module has {len(A )}.' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int:
print(F'Converting {name}...' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(F'Pushed {checkpoint_name}' )
def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any:
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = (1, num_labels)
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , __A , __A ) -> Dict:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a =self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a =encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
| 363 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **__A ) -> Dict:
super().__init__(**__A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__A )
def __call__( self , __A , __A = None , **__A , ) -> List[str]:
if "text_queries" in kwargs:
a =kwargs.pop('''text_queries''' )
if isinstance(__A , (str, Image.Image) ):
a ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
a =image
a =super().__call__(__A , **__A )
return results
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[Any]:
a ={}
if "threshold" in kwargs:
a =kwargs['''threshold''']
if "top_k" in kwargs:
a =kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =load_image(inputs['''image'''] )
a =inputs['''candidate_labels''']
if isinstance(__A , __A ):
a =candidate_labels.split(''',''' )
a =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__A ):
a =self.tokenizer(__A , return_tensors=self.framework )
a =self.image_processor(__A , return_tensors=self.framework )
yield {
"is_last": i == len(__A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
a =model_inputs.pop('''target_size''' )
a =model_inputs.pop('''candidate_label''' )
a =model_inputs.pop('''is_last''' )
a =self.model(**__A )
a ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , __A , __A=0.1 , __A=None ) -> List[str]:
a =[]
for model_output in model_outputs:
a =model_output['''candidate_label''']
a =BaseModelOutput(__A )
a =self.image_processor.post_process_object_detection(
outputs=__A , threshold=__A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
a =outputs['''scores'''][index].item()
a =self._get_bounding_box(outputs['''boxes'''][index][0] )
a ={'''score''': score, '''label''': label, '''box''': box}
results.append(__A )
a =sorted(__A , key=lambda __A : x["score"] , reverse=__A )
if top_k:
a =results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
a , a , a , a =box.int().tolist()
a ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 215 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus" ):
_snake_case = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , """html.parser""" )
_snake_case = soup.findAll("""h1""" )
_snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 341 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = mask_feature_size
def lowercase (self ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase (self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs()
_snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
with torch.no_grad():
_snake_case = MaskFormerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
def comm_check_on_output(UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
_snake_case = model(
pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> int:
_snake_case = MaskFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> int:
self.config_tester.run_common_tests()
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase (self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@slow
def lowercase (self ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(),
}
_snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase (self ) -> Tuple:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss
loss.backward()
def lowercase (self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1E-4
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase (self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase (self ) -> str:
_snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[str]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[Any]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Tuple:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_snake_case = inputs["""pixel_values"""].to(UpperCAmelCase )
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 341 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : Any = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Any = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : str = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Any = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
_UpperCAmelCase : int = """3.0.12"""
_UpperCAmelCase : Optional[int] = None
def __magic_name__( ):
global _logger
__lowerCAmelCase = _logger or logging.getLogger(__name__)
return _logger
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = lock_file
return None
def __str__(self ):
__lowerCAmelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = lock
return None
def __enter__(self ):
return self.lock
def __exit__(self , __lowercase , __lowercase , __lowercase ):
self.lock.release()
return None
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
__lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__lowerCAmelCase = self.hash_filename_if_too_long(__lowercase , __lowercase )
# The path to the lock file.
__lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCAmelCase = None
# The default timeout value.
__lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCAmelCase = 0
return None
@property
def _snake_case (self ):
return self._lock_file
@property
def _snake_case (self ):
return self._timeout
@timeout.setter
def _snake_case (self , __lowercase ):
__lowerCAmelCase = float(__lowercase )
return None
def _snake_case (self ):
raise NotImplementedError()
def _snake_case (self ):
raise NotImplementedError()
@property
def _snake_case (self ):
return self._lock_file_fd is not None
def _snake_case (self , __lowercase=None , __lowercase=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
__lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__lowercase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _snake_case (self , __lowercase=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCAmelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
self.acquire()
return self
def __exit__(self , __lowercase , __lowercase , __lowercase ):
self.release()
return None
def __del__(self ):
self.release(force=__lowercase )
return None
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = os.path.basename(__lowercase )
if len(__lowercase ) > max_length and max_length > 0:
__lowerCAmelCase = os.path.dirname(__lowercase )
__lowerCAmelCase = str(hash(__lowercase ) )
__lowerCAmelCase = filename[: max_length - len(__lowercase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__lowercase , __lowercase )
else:
return path
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__lowercase , timeout=__lowercase , max_filename_length=__lowercase )
__lowerCAmelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def _snake_case (self ):
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
except OSError:
pass
else:
try:
msvcrt.locking(__lowercase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__lowercase )
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
msvcrt.locking(__lowercase , msvcrt.LK_UNLCK , 1 )
os.close(__lowercase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
__lowerCAmelCase = os.statvfs(os.path.dirname(__lowercase ) ).f_namemax
super().__init__(__lowercase , timeout=__lowercase , max_filename_length=__lowercase )
def _snake_case (self ):
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
try:
fcntl.flock(__lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__lowercase )
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
fcntl.flock(__lowercase , fcntl.LOCK_UN )
os.close(__lowercase )
return None
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
except OSError:
pass
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
os.close(self._lock_file_fd )
__lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Any = None
if msvcrt:
_UpperCAmelCase : Any = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : List[Any] = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case_ : str = '\\n\n'
snake_case_ : str = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
snake_case_ : List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : int=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCamelCase : int = 'cuda'
else:
_UpperCamelCase : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
_UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Dict = model.to(lowerCamelCase__ )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCamelCase : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCamelCase : List[Any] = model.config.max_length - 1
else:
_UpperCamelCase : Tuple = model.config.max_length
_UpperCamelCase : Tuple = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
_UpperCamelCase : str = encodings['input_ids']
_UpperCamelCase : int = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Dict = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
_UpperCamelCase : str = min(start_index + batch_size ,len(lowerCamelCase__ ) )
_UpperCamelCase : Tuple = encoded_texts[start_index:end_index]
_UpperCamelCase : List[str] = attn_masks[start_index:end_index]
if add_start_token:
_UpperCamelCase : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
_UpperCamelCase : List[Any] = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
_UpperCamelCase : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
_UpperCamelCase : Tuple = encoded_batch
with torch.no_grad():
_UpperCamelCase : Any = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
_UpperCamelCase : Dict = out_logits[..., :-1, :].contiguous()
_UpperCamelCase : List[str] = labels[..., 1:].contiguous()
_UpperCamelCase : Optional[int] = attn_mask[..., 1:].contiguous()
_UpperCamelCase : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 83 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case :str = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class _A ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''')
except HTTPError:
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token)
__a = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax')
__a = flatten_dict(unfreeze(model.params))
__a = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical')
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax')
__a = flatten_dict(unfreeze(model.params))
__a = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token)
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
__a = flatten_dict(unfreeze(model.params))
__a = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical')
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
__a = flatten_dict(unfreeze(model.params))
__a = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical')
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__a = False
return models_are_equal
@require_flax
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE)
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , max_shard_size='''10KB''')
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE)
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
| 131 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case :Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : int , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE)} - but expected a single string. '
'''Note also that one single text can be provided for conditional image to text generation.''')
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(__SCREAMING_SNAKE_CASE).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
model_inputs.update(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation')
else:
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __SCREAMING_SNAKE_CASE)
and all(x is None for x in model_inputs['''input_ids'''])
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name)
__a = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return model_outputs
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE)
return records
| 131 | 1 |
'''simple docstring'''
A__ : str = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 185 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
# Lint as: python3
import itertools
import os
import re
snake_case_ = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
snake_case_ = re.compile(r'''([a-z\d])([A-Z])''')
snake_case_ = re.compile(r'''(?<!_)_(?!_)''')
snake_case_ = re.compile(r'''(_{2,})''')
snake_case_ = r'''^\w+(\.\w+)*$'''
snake_case_ = r'''<>:/\|?*'''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Optional[Any] = _uppercase_uppercase_re.sub(R'\1_\2' , SCREAMING_SNAKE_CASE_ )
lowercase__ : Any = _lowercase_uppercase_re.sub(R'\1_\2' , SCREAMING_SNAKE_CASE_ )
return name.lower()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : Dict = _single_underscore_re.split(SCREAMING_SNAKE_CASE_ )
lowercase__ : Any = [_multiple_underscores_re.split(SCREAMING_SNAKE_CASE_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) if n != '' )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(SCREAMING_SNAKE_CASE_ )}-{split}"""
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
'''simple docstring'''
lowercase__ : Optional[Any] = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""{filepath}*"""
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
lowercase__ : Optional[Any] = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if shard_lengths:
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(SCREAMING_SNAKE_CASE_ )]
if filetype_suffix:
lowercase__ : int = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase__ : Any = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 216 |
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
__snake_case = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 259 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[Any] = 1024
_UpperCAmelCase : Optional[int] = 4096
_UpperCAmelCase : Tuple = 24
_UpperCAmelCase : List[str] = 16
_UpperCAmelCase : str = [5, 11, 17, 23]
_UpperCAmelCase : Tuple = [256, 512, 1024, 1024]
_UpperCAmelCase : List[str] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = 150
_UpperCAmelCase : Tuple = """huggingface/label-files"""
_UpperCAmelCase : int = """ade20k-id2label.json"""
_UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
_UpperCAmelCase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : int = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
_UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
_UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : List[str] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
_UpperCAmelCase : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
_UpperCAmelCase : Dict = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
_UpperCAmelCase : List[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
_UpperCAmelCase : Optional[int] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
_UpperCAmelCase : List[str] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
_UpperCAmelCase : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
_UpperCAmelCase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : Tuple = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
_UpperCAmelCase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
_UpperCAmelCase : int = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
_UpperCAmelCase : List[str] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : str = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_UpperCAmelCase : Any = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
_UpperCAmelCase : Tuple = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
_UpperCAmelCase : Dict = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
_UpperCAmelCase : List[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
_UpperCAmelCase : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def snake_case_ ( )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : List[str] = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
_UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
_UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowerCAmelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Tuple = 480 if """ade""" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(lowerCAmelCase_ , return_tensors="""pt""" )
# forward pass
_UpperCAmelCase : Optional[Any] = model(**lowerCAmelCase_ ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
# Assert logits
_UpperCAmelCase : Optional[int] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(lowerCAmelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase_ )
)
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
A_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 215 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = '▁'
__A : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
__A : Optional[int] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__A : Any = {
'facebook/xglm-564M': 2_048,
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : str = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any]="<s>" ,_UpperCamelCase :List[Any]="</s>" ,_UpperCamelCase :List[Any]="</s>" ,_UpperCamelCase :Optional[Any]="<s>" ,_UpperCamelCase :List[Any]="<unk>" ,_UpperCamelCase :Optional[int]="<pad>" ,_UpperCamelCase :Optional[Dict[str, Any]] = None ,**_UpperCamelCase :List[Any] ,):
snake_case_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ : Union[str, Any] = 7
snake_case_ : Any = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
snake_case_ : List[Any] = kwargs.get("""additional_special_tokens""" ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCamelCase ,)
snake_case_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
snake_case_ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
snake_case_ : Dict = len(self.sp_model )
snake_case_ : List[Any] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCamelCase )
snake_case_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :str ):
snake_case_ : Optional[int] = self.__dict__.copy()
snake_case_ : int = None
snake_case_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :List[str] ,_UpperCamelCase :int ):
snake_case_ : int = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
snake_case_ : List[str] = {}
snake_case_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self :Optional[int] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def a__ ( self :Dict ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ,_UpperCamelCase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase ))
def a__ ( self :List[str] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
snake_case_ : Tuple = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def a__ ( self :Any ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def a__ ( self :Dict ):
snake_case_ : List[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self :int ,_UpperCamelCase :str ):
return self.sp_model.encode(_UpperCamelCase ,out_type=_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : Tuple = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self :Optional[int] ,_UpperCamelCase :int ):
snake_case_ : Dict = """""".join(_UpperCamelCase ).replace(_UpperCamelCase ,""" """ ).strip()
return out_string
def a__ ( self :Optional[int] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Any = os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase ,"""wb""" ) as fi:
snake_case_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 8 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
| 8 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase : List[str] =None
try:
import msvcrt
except ImportError:
__lowerCAmelCase : List[Any] =None
try:
import fcntl
except ImportError:
__lowerCAmelCase : str =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase : int =OSError
# Data
# ------------------------------------------------
__lowerCAmelCase : List[Any] =[
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__lowerCAmelCase : Dict ='3.0.12'
__lowerCAmelCase : Dict =None
def _UpperCamelCase ( ):
global _logger
__SCREAMING_SNAKE_CASE : Dict = _logger or logging.getLogger(__name__ )
return _logger
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = lock_file
return None
def __str__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self :List[str] ) -> int:
return self.lock
def __exit__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> str:
self.lock.release()
return None
class _lowercase :
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Any=-1 , lowerCAmelCase__ :List[Any]=None ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[int] = self.hash_filename_if_too_long(lowerCAmelCase__ , lowerCAmelCase__ )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : Any = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Union[str, Any] = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : Any = 0
return None
@property
def __magic_name__( self :Union[str, Any] ) -> Dict:
return self._lock_file
@property
def __magic_name__( self :Union[str, Any] ) -> int:
return self._timeout
@timeout.setter
def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> Any:
__SCREAMING_SNAKE_CASE : int = float(lowerCAmelCase__ )
return None
def __magic_name__( self :List[Any] ) -> List[str]:
raise NotImplementedError()
def __magic_name__( self :int ) -> int:
raise NotImplementedError()
@property
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
return self._lock_file_fd is not None
def __magic_name__( self :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :int=0.05 ) -> int:
# Use the default timeout, if no timeout is provided.
if timeout is None:
__SCREAMING_SNAKE_CASE : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Dict = self._lock_file
__SCREAMING_SNAKE_CASE : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCAmelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __magic_name__( self :Dict , lowerCAmelCase__ :Any=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Any = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : Any = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self :Tuple ) -> List[Any]:
self.acquire()
return self
def __exit__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] ) -> int:
self.release()
return None
def __del__( self :Union[str, Any] ) -> Any:
self.release(force=lowerCAmelCase__ )
return None
def __magic_name__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : str = os.path.basename(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : str = os.path.dirname(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = filename[: max_length - len(lowerCAmelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return path
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :Optional[int]=None ) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Dict = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCAmelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str]=-1 , lowerCAmelCase__ :Optional[Any]=None ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(lowerCAmelCase__ ) ).f_namemax
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.open(self._lock_file , lowerCAmelCase__ )
try:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def __magic_name__( self :Tuple ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__SCREAMING_SNAKE_CASE : int = self._lock_file_fd
__SCREAMING_SNAKE_CASE : List[str] = None
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
os.close(lowerCAmelCase__ )
return None
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def __magic_name__( self :List[Any] ) -> Optional[int]:
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase : Optional[int] =None
if msvcrt:
__lowerCAmelCase : str =WindowsFileLock
elif fcntl:
__lowerCAmelCase : Union[str, Any] =UnixFileLock
else:
__lowerCAmelCase : Union[str, Any] =SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ = 1_3 , lowercase_ = 6_4 , lowercase_ = 2 , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = True , lowercase_ = True , lowercase_ = 1_2_8 , lowercase_=[1_6, 3_2, 6_4, 1_2_8] , lowercase_ = 7 , lowercase_ = 4 , lowercase_ = 3_7 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 1_0 , lowercase_ = 0.02 , lowercase_ = 2 , lowercase_ = 1 , lowercase_ = 1_2_8 , lowercase_ = [2, 2, 2, 2] , lowercase_ = 2 , lowercase_ = 2 , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = encoder_stride
lowerCAmelCase_ = num_attention_outputs
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = embed_dim + 1
lowerCAmelCase_ = resolution
lowerCAmelCase_ = depths
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = dim
lowerCAmelCase_ = mlp_expansion_ratio
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = TFEfficientFormerModel(config=lowercase_ )
lowerCAmelCase_ = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = self.type_sequence_label_size
lowerCAmelCase_ = TFEfficientFormerForImageClassification(lowercase_ )
lowerCAmelCase_ = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = TFEfficientFormerForImageClassification(lowercase_ )
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__a: int = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__a: Dict = False
__a: Union[str, Any] = False
__a: Dict = False
__a: List[Any] = False
__a: List[str] = False
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = TFEfficientFormerModelTester(self )
lowerCAmelCase_ = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(lowercase_ )
lowerCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
lowerCAmelCase_ = model_class(lowercase_ )
lowerCAmelCase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowerCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
lowerCAmelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCAmelCase_ = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_ , (list, tuple) )
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowerCAmelCase_ = getattr(self.model_tester , 'seq_length' , lowercase_ )
lowerCAmelCase_ = getattr(self.model_tester , 'decoder_seq_length' , lowercase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = TFEfficientFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
lowerCAmelCase_ = getattr(self.model_tester , 'seq_length' , lowercase_ )
lowerCAmelCase_ = getattr(self.model_tester , 'encoder_seq_length' , lowercase_ )
lowerCAmelCase_ = getattr(self.model_tester , 'key_length' , lowercase_ )
lowerCAmelCase_ = getattr(self.model_tester , 'chunk_length' , lowercase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
lowerCAmelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(lowercase_ )
lowerCAmelCase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(lowercase_ )
lowerCAmelCase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase_ = model_class(lowercase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase_ = model(lowercase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase ( ) -> int:
lowerCAmelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
lowerCAmelCase_ = model(**lowercase_ , training=lowercase_ )
# verify the logits
lowerCAmelCase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase_ = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
lowerCAmelCase_ = model(**lowercase_ , training=lowercase_ )
# verify the logits
lowerCAmelCase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase_ = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 14 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 1 |
# Lint as: python3
import itertools
import os
import re
lowerCamelCase = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
lowerCamelCase = re.compile(R'''([a-z\d])([A-Z])''')
lowerCamelCase = re.compile(R'''(?<!_)_(?!_)''')
lowerCamelCase = re.compile(R'''(_{2,})''')
lowerCamelCase = R'''^\w+(\.\w+)*$'''
lowerCamelCase = R'''<>:/\|?*'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = _uppercase_uppercase_re.sub(R'''\1_\2''' , _a )
lowerCAmelCase__ : List[Any] = _lowercase_uppercase_re.sub(R'''\1_\2''' , _a )
return name.lower()
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = _single_underscore_re.split(_a )
lowerCAmelCase__ : Any = [_multiple_underscores_re.split(_a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_a ) if n != '''''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if os.path.basename(_a ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(_a )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if os.path.basename(_a ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , _a ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(_a )}-{split}'
def lowerCamelCase_ ( _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = filename_prefix_for_split(_a , _a )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
lowerCAmelCase__ : Optional[int] = os.path.join(_a , _a )
return f'{filepath}*'
def lowerCamelCase_ ( _a , _a , _a , _a=None , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = filename_prefix_for_split(_a , _a )
lowerCAmelCase__ : Optional[Any] = os.path.join(_a , _a )
if shard_lengths:
lowerCAmelCase__ : Union[str, Any] = len(_a )
lowerCAmelCase__ : Tuple = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(_a )]
if filetype_suffix:
lowerCAmelCase__ : Optional[Any] = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
lowerCAmelCase__ : Tuple = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 131 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( _lowercase):
_a : Union[str, Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ChineseCLIPImageProcessor'''
_a : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> List[str]:
lowerCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = self.image_processor
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , **_SCREAMING_SNAKE_CASE : Dict )-> List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase__ : List[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCAmelCase__ : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] )-> Any:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Any )-> int:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : Any = self.tokenizer.model_input_names
lowerCAmelCase__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__( self : str )-> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 131 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __lowerCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Any ):
for attribute in key.split("." ):
a__: Union[str, Any] =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
a__: int =getattr(__magic_name__ , __magic_name__ ).shape
else:
a__: Dict =hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
a__: int =value
elif weight_type == "weight_g":
a__: Tuple =value
elif weight_type == "weight_v":
a__: Tuple =value
elif weight_type == "bias":
a__: int =value
else:
a__: str =value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
a__: Optional[int] =[]
a__: Optional[Any] =fairseq_model.state_dict()
a__: Tuple =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__: Union[str, Any] =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == "group" , )
a__: str =True
else:
for key, mapped_key in MAPPING.items():
a__: List[str] ="sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a__: int =True
if "*" in mapped_key:
a__: List[Any] =name.split(__magic_name__ )[0].split("." )[-2]
a__: Union[str, Any] =mapped_key.replace("*" , __magic_name__ )
if "weight_g" in name:
a__: List[str] ="weight_g"
elif "weight_v" in name:
a__: Dict ="weight_v"
elif "weight" in name:
a__: Dict ="weight"
elif "bias" in name:
a__: Optional[Any] ="bias"
else:
a__: Tuple =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : str ):
a__: Dict =full_name.split("conv_layers." )[-1]
a__: Any =name.split("." )
a__: Dict =int(items[0] )
a__: Tuple =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
a__: Union[str, Any] =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
a__: Optional[Any] =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
a__: Dict =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
a__: Dict =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Any ):
a__: int =SEWConfig()
if is_finetuned:
a__: Optional[int] =model.wav_encoder.wav_model.cfg
else:
a__: Any =model.cfg
a__: Optional[Any] =fs_config.conv_bias
a__: Dict =eval(fs_config.conv_feature_layers )
a__: Dict =[x[0] for x in conv_layers]
a__: List[Any] =[x[1] for x in conv_layers]
a__: Tuple =[x[2] for x in conv_layers]
a__: Optional[int] ="gelu"
a__: List[Any] ="layer" if fs_config.extractor_mode == "layer_norm" else "group"
a__: Optional[int] =0.0
a__: Dict =fs_config.activation_fn.name
a__: int =fs_config.encoder_embed_dim
a__: List[str] =0.02
a__: Optional[Any] =fs_config.encoder_ffn_embed_dim
a__: Tuple =1e-5
a__: Tuple =fs_config.encoder_layerdrop
a__: int =fs_config.encoder_attention_heads
a__: Dict =fs_config.conv_pos_groups
a__: Optional[Any] =fs_config.conv_pos
a__: Any =len(__magic_name__ )
a__: Optional[Any] =fs_config.encoder_layers
a__: Tuple =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
a__: List[str] =model.cfg
a__: Optional[int] =fs_config.final_dropout
a__: Any =fs_config.layerdrop
a__: Optional[Any] =fs_config.activation_dropout
a__: int =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
a__: Optional[Any] =fs_config.attention_dropout
a__: List[str] =fs_config.dropout_input
a__: Optional[Any] =fs_config.dropout
a__: List[Any] =fs_config.mask_channel_length
a__: Optional[int] =fs_config.mask_channel_prob
a__: Union[str, Any] =fs_config.mask_length
a__: Optional[int] =fs_config.mask_prob
a__: int ="Wav2Vec2FeatureExtractor"
a__: Any ="Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=True ):
if is_finetuned:
a__ , a__ , a__: Any =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a__ , a__ , a__: List[str] =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
a__: List[str] =SEWConfig.from_pretrained(__magic_name__ )
else:
a__: List[Any] =convert_config(model[0] , __magic_name__ )
a__: Tuple =model[0].eval()
a__: int =True if config.feat_extract_norm == "layer" else False
a__: Dict =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
if is_finetuned:
if dict_path:
a__: List[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__: str =target_dict.pad_index
a__: Dict =target_dict.bos_index
a__: Optional[int] =target_dict.pad_index
a__: str =target_dict.bos_index
a__: Dict =target_dict.eos_index
a__: Optional[Any] =len(target_dict.symbols )
a__: Optional[int] =os.path.join(__magic_name__ , "vocab.json" )
if not os.path.isdir(__magic_name__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with open(__magic_name__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __magic_name__ )
a__: Optional[Any] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__magic_name__ , )
a__: str =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
a__: List[Any] =SEWForCTC(__magic_name__ )
else:
a__: Dict =SEWModel(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ , __magic_name__ )
hf_model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42 |
import os
def __lowerCamelCase ( __magic_name__ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
a__: str =[
[int(__magic_name__ ) for element in line.split("," )]
for line in input_file.readlines()
]
a__: int =len(__magic_name__ )
a__: int =len(matrix[0] )
a__: Optional[Any] =[[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
a__: Dict =matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
a__: int =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42 | 1 |
import random
from typing import Any
def __UpperCamelCase ( lowerCAmelCase__ : list ):
for _ in range(len(lowerCAmelCase__ ) ):
__a : Optional[Any] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
__a : Union[str, Any] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
__a , __a : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
lowercase__ =[0, 1, 2, 3, 4, 5, 6, 7]
lowercase__ =['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 216 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Optional[Any] = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : int = ''''''.join(freq_to_letter[freq] )
__a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = original_name.split('''.''' )[0]
snake_case_ : List[str] = key.split('''.''' )
snake_case_ : Union[str, Any] = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
snake_case_ : List[str] = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
snake_case_ : Tuple = orig_block_num - offset
snake_case_ : Tuple = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Dict = OrderedDict()
snake_case_ , snake_case_ : List[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case_ : str = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case_ : Any = key[: key.find('''proj''' )]
snake_case_ : List[Any] = key.replace(_UpperCamelCase , f'''patch_embeddings.{total_embed_found}.''' )
snake_case_ : Optional[int] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case_ : Optional[Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case_ : Union[str, Any] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case_ : Optional[Any] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case_ : Dict = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case_ : Optional[int] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case_ : List[str] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case_ : List[str] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case_ : int = key.replace('''head''' , '''classifier''' )
snake_case_ : int = value
return new_state_dict
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Dict = PoolFormerConfig()
# set attributes based on model_name
snake_case_ : Optional[int] = '''huggingface/label-files'''
snake_case_ : Optional[int] = model_name[-3:]
snake_case_ : Tuple = 1_000
snake_case_ : int = '''imagenet-1k-id2label.json'''
snake_case_ : List[str] = (1, 1_000)
# set config attributes
snake_case_ : int = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Union[str, Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case_ : List[str] = [2, 2, 6, 2]
snake_case_ : Optional[int] = [64, 128, 320, 512]
snake_case_ : List[str] = 4.0
snake_case_ : List[str] = 0.9
elif size == "s24":
snake_case_ : Optional[int] = [4, 4, 12, 4]
snake_case_ : Tuple = [64, 128, 320, 512]
snake_case_ : List[Any] = 4.0
snake_case_ : Optional[int] = 0.9
elif size == "s36":
snake_case_ : List[Any] = [6, 6, 18, 6]
snake_case_ : List[str] = [64, 128, 320, 512]
snake_case_ : Optional[Any] = 4.0
snake_case_ : Any = 1E-6
snake_case_ : Any = 0.9
elif size == "m36":
snake_case_ : Union[str, Any] = [6, 6, 18, 6]
snake_case_ : Union[str, Any] = [96, 192, 384, 768]
snake_case_ : Optional[int] = 4.0
snake_case_ : Optional[Any] = 1E-6
snake_case_ : List[Any] = 0.95
elif size == "m48":
snake_case_ : Union[str, Any] = [8, 8, 24, 8]
snake_case_ : int = [96, 192, 384, 768]
snake_case_ : Optional[int] = 4.0
snake_case_ : str = 1E-6
snake_case_ : Optional[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
snake_case_ : Optional[Any] = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : str = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case_ : Any = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
snake_case_ : List[str] = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
snake_case_ : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
snake_case_ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case_ : Union[str, Any] = model(_UpperCamelCase )
snake_case_ : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case_ : str = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case_ : Optional[Any] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case_ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case_ : Optional[Any] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case_ : Any = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : List[str] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Union[str, Any] = attentions
if self.add_downsample:
snake_case_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Tuple = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : str = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : Tuple = attentions
if self.add_upsample:
snake_case_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Dict = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Tuple = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Tuple = resnets
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Tuple = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[int] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : int = []
for _ in range(self.num_layers ):
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
snake_case_ : Optional[int] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase_ = {
'''facebook/xglm-564M''': 20_48,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : int="<s>" , _UpperCamelCase : int="</s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[Any] , ) ->None:
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ = 7
snake_case_ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
snake_case_ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case_ = len(self.sp_model )
snake_case_ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCamelCase )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) ->str:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase ))
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case__( self : int ) ->Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case__( self : Any ) ->int:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__( self : int , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__( self : Dict , _UpperCamelCase : List[str] ) ->int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__( self : Tuple , _UpperCamelCase : int ) ->str:
snake_case_ = ''''''.join(_UpperCamelCase ).replace(_UpperCamelCase , ''' ''' ).strip()
return out_string
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 8 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a=0.2 , _a=0.2 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Tuple = bp_numa
SCREAMING_SNAKE_CASE__ : int = bp_numa
SCREAMING_SNAKE_CASE__ : int = conva_get[:2]
SCREAMING_SNAKE_CASE__ : int = conva_get[2]
SCREAMING_SNAKE_CASE__ : List[str] = size_pa
SCREAMING_SNAKE_CASE__ : Optional[Any] = rate_w
SCREAMING_SNAKE_CASE__ : Dict = rate_t
SCREAMING_SNAKE_CASE__ : Tuple = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(_a , """wb""" ) as f:
pickle.dump(_a , _a )
print(f'''Model saved: {save_path}''' )
@classmethod
def _a ( cls , _a ) -> int:
"""simple docstring"""
with open(_a , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : str = pickle.load(_a ) # noqa: S301
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_dic.get("""size_pooling1""" )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get("""num_bp1""" )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get("""num_bp2""" )
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get("""num_bp3""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_dic.get("""rate_weight""" )
SCREAMING_SNAKE_CASE__ : int = model_dic.get("""rate_thre""" )
# create model instance
SCREAMING_SNAKE_CASE__ : List[Any] = CNN(_a , _a , _a , _a , _a , _a , _a )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get("""w_conv1""" )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get("""wkj""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get("""vji""" )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get("""thre_conv1""" )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get("""thre_bp2""" )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def _a ( self , _a ) -> str:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def _a ( self , _a ) -> int:
"""simple docstring"""
return round(_a , 3 )
def _a ( self , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = convs[1]
SCREAMING_SNAKE_CASE__ : Tuple = np.shape(_a )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _a ):
for j_focus in range(0 , size_data - size_conv + 1 , _a ):
SCREAMING_SNAKE_CASE__ : Any = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_a )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_a ):
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i_focus in range(len(_a ) ):
SCREAMING_SNAKE_CASE__ : str = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_a ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asmatrix(_a ).reshape(
_a , _a )
data_featuremap.append(_a )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_a ) )
SCREAMING_SNAKE_CASE__ : int = np.asarray(_a )
return focus_list, data_featuremap
def _a ( self , _a , _a , _a="average_pool" ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : Dict = []
for i_map in range(len(_a ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : Any = []
for i_focus in range(0 , _a , _a ):
for j_focus in range(0 , _a , _a ):
SCREAMING_SNAKE_CASE__ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_a ).reshape(_a , _a )
featuremap_pooled.append(_a )
return featuremap_pooled
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(len(_a ) ):
SCREAMING_SNAKE_CASE__ : int = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Dict = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(_a )
SCREAMING_SNAKE_CASE__ : Any = np.asarray(_a )
return data_expanded
def _a ( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = np.shape(_a )
SCREAMING_SNAKE_CASE__ : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a ( self , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Any = 0
for i_map in range(_a ):
SCREAMING_SNAKE_CASE__ : Dict = np.ones((size_map, size_map) )
for i in range(0 , _a , _a ):
for j in range(0 , _a , _a ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Any = i_pool + 1
SCREAMING_SNAKE_CASE__ : int = np.multiply(
_a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_a )
return pd_all
def _a ( self , _a , _a , _a , _a , _a , _a=bool ) -> List[Any]:
"""simple docstring"""
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(_a )) )
print((""" - - Shape: Teach_Data """, np.shape(_a )) )
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10_000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(_a ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : List[str] = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : int = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_a , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Dict = np.shape(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._expand(_a )
SCREAMING_SNAKE_CASE__ : int = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_a , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Dict = self.sig(_a )
SCREAMING_SNAKE_CASE__ : Dict = np.dot(_a , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_a , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : Dict = np.multiply(
np.dot(_a , self.wkj ) , np.multiply(_a , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[str] = np.dot(_a , self.vji )
SCREAMING_SNAKE_CASE__ : List[str] = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : Any = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Dict = self._calculate_gradient_from_pool(
_a , _a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : str = self.rate_weight * np.dot(_a , _a )
SCREAMING_SNAKE_CASE__ : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : List[str] = rp + 1
SCREAMING_SNAKE_CASE__ : Optional[int] = error_count / patterns
all_mse.append(_a )
def draw_error():
SCREAMING_SNAKE_CASE__ : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_a , """+-""" )
plt.plot(_a , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(_a , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(_a )) )
for p in range(len(_a ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.pooling(_a , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_a )
SCREAMING_SNAKE_CASE__ : Dict = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : List[Any] = self.sig(_a )
SCREAMING_SNAKE_CASE__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_a )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : int = [list(map(self.do_round , _a ) ) for each in produce_out]
return np.asarray(_a )
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = np.asmatrix(_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Tuple = self.pooling(_a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 56 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
a :str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a :Dict = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowercase ( __lowerCAmelCase ) -> list[list[int]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE__ : List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE__ : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowerCAmelCase )
return next_generation
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> list[Image.Image]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for _ in range(__lowerCAmelCase ):
# Create output image
SCREAMING_SNAKE_CASE__ : int = Image.new("""RGB""" , (len(cells[0] ), len(__lowerCAmelCase )) )
SCREAMING_SNAKE_CASE__ : List[Any] = img.load()
# Save cells to image
for x in range(len(__lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE__ : str = 255 - cells[y][x] * 255
SCREAMING_SNAKE_CASE__ : Optional[Any] = (colour, colour, colour)
# Save image
images.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_generation(__lowerCAmelCase )
return images
if __name__ == "__main__":
a :Dict = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 56 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ (snake_case__ : int = 8 ):
"""simple docstring"""
_snake_case : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
i -= len(snake_case__ )
_snake_case : List[Any] = i // 3
_snake_case : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_snake_case : Optional[Any] = (
chars_incl
+ random(snake_case__ , quotient + remainder )
+ random(snake_case__ , snake_case__ )
+ random(snake_case__ , snake_case__ )
)
_snake_case : Union[str, Any] = list(snake_case__ )
shuffle(snake_case__ )
return "".join(snake_case__ )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int = 8 ):
"""simple docstring"""
if len(snake_case__ ) < min_length:
# Your Password must be at least 8 characters long
return False
_snake_case : Any = any(char in ascii_uppercase for char in password )
_snake_case : List[Any] = any(char in ascii_lowercase for char in password )
_snake_case : Optional[int] = any(char in digits for char in password )
_snake_case : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = int(input("""Please indicate the max length of your password: """ ).strip() )
_snake_case : Optional[Any] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(snake_case__ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(snake_case__ , snake_case__ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = """https://pypi.org/pypi/diffusers/json"""
_snake_case : Optional[int] = json.loads(request.urlopen(snake_case__ ).read() )["""releases"""].keys()
return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : str = Path(snake_case__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_snake_case : List[Any] = Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : List[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Tuple = False
_snake_case : Any = [module_file]
_snake_case : str = []
# Let's recurse through all relative imports
while not no_change:
_snake_case : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
_snake_case : Dict = Path(snake_case__ ).parent
_snake_case : Dict = [str(module_path / m ) for m in new_imports]
_snake_case : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_snake_case : str = [F"{f}.py" for f in new_import_files]
_snake_case : Dict = len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
# Imports of the form `import xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
_snake_case : Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
_snake_case : Any = list(set(snake_case__ ) )
_snake_case : int = []
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`" )
return get_relative_imports(snake_case__ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = module_path.replace(os.path.sep , """.""" )
_snake_case : int = importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_snake_case : Tuple = dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
_snake_case : Dict = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
_snake_case : List[str] = cls
return pipeline_class
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ):
"""simple docstring"""
_snake_case : List[str] = str(snake_case__ )
_snake_case : Optional[Any] = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
_snake_case : List[str] = module_file_or_url
_snake_case : Optional[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
_snake_case : Tuple = get_diffusers_versions()
# cut ".dev0"
_snake_case : Union[str, Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
_snake_case : int = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
_snake_case : Optional[Any] = F"v{revision}"
elif revision == "main":
_snake_case : int = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
_snake_case : List[str] = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
_snake_case : Dict = cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Union[str, Any] = """git"""
_snake_case : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
_snake_case : str = hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Optional[Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
_snake_case : int = check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
_snake_case : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
_snake_case : Any = Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
_snake_case : Any = F"{module_needed}.py"
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Any = use_auth_token
elif use_auth_token is True:
_snake_case : int = HfFolder.get_token()
else:
_snake_case : Optional[int] = None
_snake_case : int = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_snake_case : int = submodule_path / commit_hash
_snake_case : Optional[int] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , F"{module_needed}.py" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Tuple , ):
"""simple docstring"""
_snake_case : Union[str, Any] = get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace(""".py""" , """""" ) )
| 132 | 0 |
'''simple docstring'''
from __future__ import annotations
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCAmelCase_ ) != 0:
_snake_case = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase_ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise error
_snake_case = rows
else:
_snake_case = []
def lowerCamelCase ( self ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return len(self.rows )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return len(self.rows[0] )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.order[0] == self.order[1]
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCamelCase ( self ):
"""simple docstring"""
return bool(self.determinant() )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase_ ).determinant()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ )
return -1 * self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCamelCase ( self ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
"""simple docstring"""
return str(self.rows )
def __str__( self ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCAmelCase_ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise type_error
if len(lowerCAmelCase_ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCAmelCase_ )
else:
_snake_case = self.rows[0:position] + [row] + self.rows[position:]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise type_error
if len(lowerCAmelCase_ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_snake_case = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_snake_case = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase_ ):
"""simple docstring"""
return not self == other
def __neg__( self ):
"""simple docstring"""
return self * -1
def __add__( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase_ , lowerCAmelCase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_snake_case = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42 | 1 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = 2
while True:
__lowercase = 0
__lowercase = fa + fa
__lowercase , __lowercase = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52 |
'''simple docstring'''
import string
def _A ( A__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowercase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase = string.ascii_uppercase.find(A__ )
__lowercase = num - key
if num < 0:
__lowercase = num + len(string.ascii_uppercase )
__lowercase = translated + string.ascii_uppercase[num]
else:
__lowercase = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Encrypted message: ''' )
__lowercase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[Any] = use_attention_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : Any = vocab_size
snake_case_ : str = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Union[str, Any] = num_choices
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_attention_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__magic_name__ , )
return config, input_ids, attention_mask
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Any = model_class_name.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Optional[int] = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : Union[str, Any] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''megatron-bert'''
def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : List[str] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : Dict = use_cache
| 279 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ : Tuple = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 243 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : List[Any] = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 243 | 1 |
'''simple docstring'''
import re
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
snake_case_ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) )
if __name__ == "__main__":
a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 56 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def A_ ( *lowercase_ : int , **lowercase_ : str ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def A_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
snake_case_ = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def A_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : int ):
snake_case_ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
'''score''': ANY(lowercase_ ),
'''label''': ANY(lowercase_ ),
'''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )},
} , )
import datasets
snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case_ = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
snake_case_ = object_detector(lowercase_ , threshold=0.0 )
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
'''score''': ANY(lowercase_ ),
'''label''': ANY(lowercase_ ),
'''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def A_ ( self : int ):
pass
@require_torch
def A_ ( self : Tuple ):
snake_case_ = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ )
snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ )
snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
snake_case_ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def A_ ( self : Optional[int] ):
snake_case_ = '''facebook/detr-resnet-50'''
snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ )
snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ )
snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case_ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def A_ ( self : Tuple ):
snake_case_ = '''facebook/detr-resnet-50'''
snake_case_ = pipeline('''object-detection''' , model=lowercase_ )
snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case_ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def A_ ( self : str ):
snake_case_ = 0.9985
snake_case_ = '''facebook/detr-resnet-50'''
snake_case_ = pipeline('''object-detection''' , model=lowercase_ )
snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def A_ ( self : Dict ):
snake_case_ = '''Narsil/layoutlmv3-finetuned-funsd'''
snake_case_ = 0.9993
snake_case_ = pipeline('''object-detection''' , model=lowercase_ , threshold=lowercase_ )
snake_case_ = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 56 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( ) -> Iterator[int]:
A_ = 2
while True:
if is_prime(UpperCAmelCase__ ):
yield num
num += 1
def UpperCAmelCase__ ( UpperCAmelCase__ = 2_00_00_00 ) -> int:
return sum(takewhile(lambda UpperCAmelCase__ : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
class A__ ( nn.Module ):
lowercase = 42
lowercase = (16, 32, 96, 256)
lowercase = jnp.floataa
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ = []
for i in range(len(self.block_out_channels ) - 1 ):
A_ = self.block_out_channels[i]
A_ = self.block_out_channels[i + 1]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = blocks
A_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.conv_in(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
for block in self.blocks:
A_ = block(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class A__ ( nn.Module , _snake_case , _snake_case ):
lowercase = 32
lowercase = 4
lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase = False
lowercase = (320, 640, 1_280, 1_280)
lowercase = 2
lowercase = 8
lowercase = None
lowercase = 1_280
lowercase = 0.0
lowercase = False
lowercase = jnp.floataa
lowercase = True
lowercase = 0
lowercase = "rgb"
lowercase = (16, 32, 96, 256)
def snake_case_ ( self , UpperCamelCase__ ) -> FrozenDict:
'''simple docstring'''
# init input tensors
A_ = (1, self.in_channels, self.sample_size, self.sample_size)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ = jnp.ones((1,) , dtype=jnp.intaa )
A_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ , A_ = jax.random.split(UpperCamelCase__ )
A_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.block_out_channels
A_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ = self.num_attention_heads or self.attention_head_dim
# input
A_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
A_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A_ = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (num_attention_heads,) * len(self.down_block_types )
# down
A_ = []
A_ = []
A_ = block_out_channels[0]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A_ = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
A_ = down_blocks
A_ = controlnet_down_blocks
# mid
A_ = block_out_channels[-1]
A_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1.0 , UpperCamelCase__ = True , UpperCamelCase__ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A_ = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
A_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ = timesteps.astype(dtype=jnp.floataa )
A_ = jnp.expand_dims(UpperCamelCase__ , 0 )
A_ = self.time_proj(UpperCamelCase__ )
A_ = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.conv_in(UpperCamelCase__ )
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
A_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
A_ = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
A_ = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
A_ = controlnet_down_block_res_samples
A_ = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
A_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 101 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.