code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[str] ) -> str:
_a : List[Any] =b.T
_a : Optional[Any] =np.sum(np.square(__UpperCamelCase ) ,axis=1 )
_a : Union[str, Any] =np.sum(np.square(__UpperCamelCase ) ,axis=0 )
_a : Any =np.matmul(__UpperCamelCase ,__UpperCamelCase )
_a : Union[str, Any] =aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> Dict:
_a : Union[str, Any] =x.reshape(-1 ,3 )
_a : int =squared_euclidean_distance(__UpperCamelCase ,__UpperCamelCase )
return np.argmin(__UpperCamelCase ,axis=1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self :int , SCREAMING_SNAKE_CASE :Optional[Any] = None , SCREAMING_SNAKE_CASE :Optional[Any] = True , SCREAMING_SNAKE_CASE :Optional[Any] = None , SCREAMING_SNAKE_CASE :List[str] = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :str = True , SCREAMING_SNAKE_CASE :Tuple = True , **SCREAMING_SNAKE_CASE :int , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_a : Union[str, Any] =size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_a : str =get_size_dict(UpperCamelCase_ )
_a : Optional[Any] =np.array(UpperCamelCase_ ) if clusters is not None else None
_a : Optional[Any] =do_resize
_a : List[str] =size
_a : Dict =resample
_a : Any =do_normalize
_a : int =do_color_quantize
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Any = None , **SCREAMING_SNAKE_CASE :int , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] = None , ) -> np.ndarray:
'''simple docstring'''
_a : List[str] =rescale(image=UpperCamelCase_ , scale=1 / 1_2_7.5 , data_format=UpperCamelCase_ )
_a : Optional[Any] =image - 1
return image
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any = None , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :List[str] = None , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :Dict = None , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :Union[str, Any] = None , SCREAMING_SNAKE_CASE :Any = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Any =do_resize if do_resize is not None else self.do_resize
_a : int =size if size is not None else self.size
_a : str =get_size_dict(UpperCamelCase_ )
_a : Optional[int] =resample if resample is not None else self.resample
_a : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[int] =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_a : Union[str, Any] =clusters if clusters is not None else self.clusters
_a : Optional[int] =np.array(UpperCamelCase_ )
_a : List[Any] =make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_a : Any =[to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
_a : List[Any] =[self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_normalize:
_a : Optional[int] =[self.normalize(image=UpperCamelCase_ ) for image in images]
if do_color_quantize:
_a : int =[to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_a : Optional[int] =np.array(UpperCamelCase_ )
_a : str =color_quantize(UpperCamelCase_ , UpperCamelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_a : Union[str, Any] =images.shape[0]
_a : Optional[Any] =images.reshape(UpperCamelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_a : Optional[int] =list(UpperCamelCase_ )
else:
_a : List[Any] =[to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_a : str ={'''input_ids''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 276 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
a_ = True
except (ImportError, AttributeError):
a_ = object
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
pass
a_ = False
a_ = logging.get_logger('transformers-cli/serving')
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCamelCase , args.host , args.port , args.workers )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Tuple:
__lowercase : Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : List[Any] = pipeline
__lowercase : str = host
__lowercase : List[str] = port
__lowercase : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowercase : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self ) -> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Optional[int]:
try:
__lowercase : Any = self._pipeline.tokenizer.tokenize(UpperCamelCase_ )
if return_ids:
__lowercase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
return ServeTokenizeResult(tokens=UpperCamelCase_ , tokens_ids=UpperCamelCase_ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , ) -> Dict:
try:
__lowercase : Tuple = self._pipeline.tokenizer.decode(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
async def _lowerCamelCase ( self , UpperCamelCase_=Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Union[str, Any]:
# Check we don't have empty string
if len(UpperCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowercase : Optional[Any] = self._pipeline(UpperCamelCase_ )
return ServeForwardResult(output=UpperCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(UpperCamelCase_ )} )
| 249 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = '''ylacombe/bark-small'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = '''en_speaker_1'''
_UpperCamelCase = '''This is a test string'''
_UpperCamelCase = '''speaker_embeddings_path.json'''
_UpperCamelCase = '''speaker_embeddings'''
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BarkProcessor(tokenizer=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
_UpperCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCamelCase = 35
_UpperCamelCase = 2
_UpperCamelCase = 8
_UpperCamelCase = {
'''semantic_prompt''': np.ones(__lowerCAmelCase),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len)),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
_UpperCamelCase = processor(text=self.input_string , voice_preset=__lowerCAmelCase)
_UpperCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([])).tolist())
# test loading voice preset from npz file
_UpperCamelCase = os.path.join(self.tmpdirname , '''file.npz''')
np.savez(__lowerCAmelCase , **__lowerCAmelCase)
_UpperCamelCase = processor(text=self.input_string , voice_preset=__lowerCAmelCase)
_UpperCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([])).tolist())
# test loading voice preset from the hub
_UpperCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BarkProcessor(tokenizer=__lowerCAmelCase)
_UpperCamelCase = processor(text=self.input_string)
_UpperCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 358 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 100 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = []
__a = []
__a = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__a = len(_SCREAMING_SNAKE_CASE ) if (len(_SCREAMING_SNAKE_CASE ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(_SCREAMING_SNAKE_CASE ) , """Postfix""".center(_SCREAMING_SNAKE_CASE ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_SCREAMING_SNAKE_CASE ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_SCREAMING_SNAKE_CASE ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_SCREAMING_SNAKE_CASE ) == 0:
stack.append(_SCREAMING_SNAKE_CASE ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_SCREAMING_SNAKE_CASE ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_SCREAMING_SNAKE_CASE ) # push x to stack
print(
x.center(8 ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , sep=""" | """ , ) # Output in tabular format
while len(_SCREAMING_SNAKE_CASE ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , sep=""" | """ , ) # Output in tabular format
return "".join(_SCREAMING_SNAKE_CASE ) # return Postfix as str
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if infix[i] == "(":
__a = """)""" # change "(" to ")"
elif infix[i] == ")":
__a = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(_SCREAMING_SNAKE_CASE ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCamelCase__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
lowerCamelCase__ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 302 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
UpperCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
UpperCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
UpperCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
A: Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
A: Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
A: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A: str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
A: Any = block_out_channels[0]
if use_timestep_embedding:
A: Optional[Any] = block_out_channels[0] * 4
A: List[Any] = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , )
A: Optional[Any] = nn.ModuleList([] )
A: str = None
A: str = nn.ModuleList([] )
A: Tuple = None
# down
A: Any = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = output_channel
A: List[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
A: Optional[int] = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
A: Union[str, Any] = get_mid_block(
SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , )
# up
A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) )
A: List[str] = reversed_block_out_channels[0]
if out_block_type is None:
A: int = out_channels
else:
A: Union[str, Any] = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
A: List[Any] = output_channel
A: int = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
A: Optional[Any] = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
A: Any = output_channel
# out
A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A: Optional[int] = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
A: Any = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
A: List[str] = timesteps[None].to(sample.device )
A: int = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
A: str = timestep_embed[..., None]
A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A: List[str] = ()
for downsample_block in self.down_blocks:
A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A: List[Any] = down_block_res_samples[-1:]
A: List[str] = down_block_res_samples[:-1]
A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 334 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCAmelCase = 50_0000
_lowerCAmelCase , _lowerCAmelCase = os.path.split(__file__)
_lowerCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = dataset.map(**UpperCamelCase )
@get_duration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dataset.filter(**UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[str] = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase__ : Union[str, Any] = generate_example_dataset(
os.path.join(UpperCamelCase , """dataset.arrow""" ) , UpperCamelCase , num_examples=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase )
def tokenize(UpperCamelCase ):
return tokenizer(examples["""text"""] )
lowerCAmelCase__ : int = map(UpperCamelCase )
lowerCAmelCase__ : List[Any] = map(UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : int = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCAmelCase__ : Optional[Any] = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCAmelCase__ : Dict = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowerCAmelCase__ : Any = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowerCAmelCase__ : Dict = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = map(UpperCamelCase , function=UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : Any = filter(UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 37 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
snake_case_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 187 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__SCREAMING_SNAKE_CASE : List[str] = '\\n Text data.\n Second line of data.'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'file'
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / """cache"""
snake_case_ = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
snake_case_ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = """custom_cache"""
snake_case_ = """custom_extracted_dir"""
snake_case_ = tmp_path / """custom_extracted_path"""
if default_extracted:
snake_case_ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
snake_case_ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
# absolute path
snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
# absolute path
snake_case_ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
snake_case_ = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("""s3://huggingface.co""" )
| 362 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _a ( _SCREAMING_SNAKE_CASE = 1_500_000 ) -> int:
snake_case_ = defaultdict(_SCREAMING_SNAKE_CASE )
snake_case_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
snake_case_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = ["""image_processor""", """tokenizer"""]
a_ : List[str] = """ViTImageProcessor"""
a_ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , a_ : str=None , a_ : Dict=None , **a_ : List[Any] ):
lowerCAmelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self : Union[str, Any] , a_ : Any=None , a_ : Dict=None , a_ : List[str]=None , a_ : str=None , **a_ : Any ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None:
lowerCAmelCase_ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if images is not None:
lowerCAmelCase_ : List[str] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ : Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ : Dict = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : Optional[int] , *a_ : Optional[Any] , **a_ : List[str] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : Tuple , **a_ : Tuple ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Dict ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 241 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__a :Dict = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[str]=6.0 , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Any=None , UpperCAmelCase : Any="fp4" , UpperCAmelCase : Any=False , **UpperCAmelCase : Tuple , ):
A_ = load_in_abit
A_ = load_in_abit
A_ = llm_inta_threshold
A_ = llm_inta_skip_modules
A_ = llm_inta_enable_fpaa_cpu_offload
A_ = llm_inta_has_fpaa_weight
A_ = bnb_abit_quant_type
A_ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
A_ = torch.floataa
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = getattr(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , torch.dtype ):
A_ = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def __A ( self : int ):
if not isinstance(self.llm_inta_threshold , UpperCAmelCase ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def __A ( self : Tuple ):
return self.load_in_abit or self.load_in_abit
def __A ( self : Union[str, Any] ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __A ( cls : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
A_ = cls(**UpperCAmelCase )
A_ = []
for key, value in kwargs.items():
if hasattr(UpperCAmelCase , UpperCAmelCase ):
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
to_remove.append(UpperCAmelCase )
for key in to_remove:
kwargs.pop(UpperCAmelCase , UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __A ( self : int , UpperCAmelCase : Union[str, os.PathLike] ):
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
A_ = self.to_dict()
A_ = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + "\n"
writer.write(UpperCAmelCase )
def __A ( self : Any ):
A_ = copy.deepcopy(self.__dict__ )
A_ = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : Dict ):
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def __A ( self : Any , UpperCAmelCase : bool = True ):
if use_diff is True:
A_ = self.to_diff_dict()
else:
A_ = self.to_dict()
return json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + "\n"
def __A ( self : Union[str, Any] ):
A_ = self.to_dict()
# get the default config dict
A_ = BitsAndBytesConfig().to_dict()
A_ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
A_ = value
return serializable_config_dict
| 329 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowerCamelCase : Tuple = random.Random()
def __lowerCamelCase ( A__ , A__=1.0 , A__=None , A__=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : str=2_0_0_0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_6_0_0_0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = feature_size
UpperCamelCase = padding_value
UpperCamelCase = sampling_rate
UpperCamelCase = return_attention_mask
UpperCamelCase = do_normalize
def A ( self : Optional[int] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : Union[str, Any] , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
def _flatten(UpperCamelCase__ : Optional[Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractionTester(self )
def A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test batched
UpperCamelCase = feat_extract(UpperCamelCase__ , return_tensors='np' ).input_values
UpperCamelCase = feat_extract(UpperCamelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase = np.asarray(UpperCamelCase__ )
UpperCamelCase = feat_extract(UpperCamelCase__ , return_tensors='np' ).input_values
UpperCamelCase = feat_extract(UpperCamelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCamelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = feat_extract(UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=1_0_0_0 , padding='longest' , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=2_0_0_0 , padding='longest' , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def A ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def A ( self : Any ):
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 28 |
"""simple docstring"""
from collections import defaultdict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = first_str.lower().strip()
__SCREAMING_SNAKE_CASE = second_str.lower().strip()
# Remove whitespace
__SCREAMING_SNAKE_CASE = first_str.replace(""" """ , """""" )
__SCREAMING_SNAKE_CASE = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
return False
# Default values for count should be 0
__SCREAMING_SNAKE_CASE = defaultdict(UpperCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__magic_name__ = input("Enter the first string ").strip()
__magic_name__ = input("Enter the second string ").strip()
__magic_name__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 100 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
@experimental
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> List[str]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return _map_with_joblib(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> List[str]:
A_ : Any = num_proc if num_proc <= len(_lowerCamelCase ) else len(_lowerCamelCase )
A_ : str = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCamelCase ):
A_ : Dict = len(_lowerCamelCase ) // num_proc
A_ : Union[str, Any] = len(_lowerCamelCase ) % num_proc
A_ : Tuple = div * index + min(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(_lowerCamelCase )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(_lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
A_ : Optional[Any] = None, None
if not disable_tqdm:
A_ : Optional[int] = (RLock(),), tqdm.set_lock
with Pool(_lowerCamelCase , initargs=_lowerCamelCase , initializer=_lowerCamelCase ) as pool:
A_ : Dict = pool.map(_lowerCamelCase , _lowerCamelCase )
logger.info(f'Finished {num_proc} processes' )
A_ : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(_lowerCamelCase )} objects' )
return mapped
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case__ ( lowerCamelCase__ : str ) -> int:
A_ : Optional[int] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A_ : int = None
| 369 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a: List[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Any:
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __lowerCAmelCase , )
super().__init__(args=__lowerCAmelCase , **__lowerCAmelCase )
| 198 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase )
if number < 1:
lowerCamelCase__ : Union[str, Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : List[Any] = 1
for i in range(1 , UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_A : List[Any] =True
except ImportError:
_A : int =False
_A : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _lowercase ( _lowercase ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ):
lowerCamelCase__ : List[str] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=UpperCamelCase__ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=UpperCamelCase__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self: Optional[int] , UpperCamelCase__: bool , UpperCamelCase__: str , UpperCamelCase__: str=None , *UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[Any] = testing
lowerCamelCase__ : Tuple = testing_file
lowerCamelCase__ : int = path
def lowerCamelCase_ ( self: int ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase__ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(UpperCamelCase__ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCamelCase__ : int = (
Path(UpperCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase__ : int = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase__ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCamelCase__ : List[str] = json.load(UpperCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , )
lowerCamelCase__ : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCamelCase__ : int = json.load(UpperCamelCase__ )
lowerCamelCase__ : Tuple = configuration["""lowercase_modelname"""]
lowerCamelCase__ : int = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'''{directory}/configuration.json''' )
lowerCamelCase__ : Union[str, Any] = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase__ : Union[str, Any] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase__ : Tuple = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase__ : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCamelCase__ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(UpperCamelCase__: Optional[int] ):
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase__ : Union[str, Any] = f.readlines()
with open(UpperCamelCase__ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: List[str] ):
# Create temp file
lowerCamelCase__ , lowerCamelCase__ : Any = mkstemp()
lowerCamelCase__ : Tuple = False
with fdopen(UpperCamelCase__ , """w""" ) as new_file:
with open(UpperCamelCase__ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase__ )
if line_to_copy_below in line:
lowerCamelCase__ : int = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase__ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase__ , UpperCamelCase__ )
# Remove original file
remove(UpperCamelCase__ )
# Move new file
move(UpperCamelCase__ , UpperCamelCase__ )
def skip_units(UpperCamelCase__: Optional[int] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase__: List[str] ):
with open(UpperCamelCase__ ) as datafile:
lowerCamelCase__ : int = []
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : int = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase__ : List[str] = line.split("""\"""" )[1]
lowerCamelCase__ : List[str] = skip_units(UpperCamelCase__ )
elif "# Below: " in line and "##" not in line:
lowerCamelCase__ : List[Any] = line.split("""\"""" )[1]
lowerCamelCase__ : Union[str, Any] = skip_units(UpperCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase__ : str = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase__ )
remove(UpperCamelCase__ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(UpperCamelCase__ )
| 129 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
_snake_case : int = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__a )
from datasets import load_dataset
_snake_case : Any = load_dataset("""nielsr/rvlcdip-demo""" )
_snake_case : List[str] = dataset["""train"""][0]["""image"""].convert("""RGB""" )
_snake_case : Any = image_processor(__a, return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
_snake_case : List[Any] = model(**__a )
_snake_case : List[str] = outputs.logits
_snake_case : Union[str, Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape, __a )
_snake_case : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=__a, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], __a, atol=1E-4 ) )
| 64 |
from itertools import permutations
def snake_case_ ( lowerCAmelCase_ : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case_ ( lowerCAmelCase_ : int = 10 ):
return sum(
int("""""".join(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
for num in permutations(range(lowerCAmelCase_ ) )
if is_substring_divisible(lowerCAmelCase_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 233 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a ( _lowerCamelCase ):
snake_case_ = (DDPMParallelScheduler,)
def A_ ( self : List[Any] , **lowercase_ : str ):
snake_case_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowercase_ )
return config
def A_ ( self : List[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def A_ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def A_ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def A_ ( self : List[str] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def A_ ( self : Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def A_ ( self : Optional[Any] ):
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def A_ ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def A_ ( self : Union[str, Any] ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowercase_ )
def A_ ( self : Dict ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def A_ ( self : Dict ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = len(lowercase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = self.dummy_sample_deter + 0.1
snake_case_ = self.dummy_sample_deter - 0.1
snake_case_ = samplea.shape[0]
snake_case_ = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
snake_case_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def A_ ( self : str ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = len(lowercase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
snake_case_ = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def A_ ( self : int ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = len(lowercase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
snake_case_ = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def A_ ( self : int ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
snake_case_ = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
snake_case_ = -1
else:
snake_case_ = timesteps[i + 1]
snake_case_ = scheduler.previous_timestep(lowercase_ )
snake_case_ = prev_t.item()
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : str ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = [100, 87, 50, 51, 0]
with self.assertRaises(lowercase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = [100, 87, 50, 1, 0]
snake_case_ = len(lowercase_ )
with self.assertRaises(lowercase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def A_ ( self : Any ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'spiece.model'}
a : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a : Dict = {'bert_for_seq_generation': 512}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = []
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Any , lowercase_ : str , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<::::>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def A_ ( self : int ):
return self.sp_model.get_piece_size()
def A_ ( self : Union[str, Any] ):
snake_case_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Optional[int] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Any , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase_ )
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = self.sp_model.IdToPiece(lowercase_ )
return token
def A_ ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
snake_case_ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def A_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 72 | 0 |
def lowerCAmelCase__ ( a__: list ) -> bool:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(a__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(a__ ) == 1:
return True
_UpperCAmelCase = series[1] - series[0]
for index in range(len(a__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCAmelCase__ ( a__: list ) -> float:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(a__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_UpperCAmelCase = 0
for val in series:
answer += val
return answer / len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 1 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Dict = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowerCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 366 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , a_ : List[str] , a_ : Tuple=7 , a_ : Any=3 , a_ : Union[str, Any]=18 , a_ : List[str]=30 , a_ : List[str]=4_00 , a_ : str=True , a_ : Tuple=None , a_ : str=True , a_ : Optional[int]=None , ):
lowerCAmelCase_ : Any = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : int = min_resolution
lowerCAmelCase_ : Tuple = max_resolution
lowerCAmelCase_ : str = do_resize
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : Tuple = crop_size
def lowerCamelCase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "do_center_crop" ) )
self.assertTrue(hasattr(a_ , "crop_size" ) )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase ( self : Tuple ):
pass
def lowerCamelCase ( self : Any ):
# Initialize image_processing
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self : str ):
# Initialize image_processing
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : str = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 161 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
def merge(snake_case__ : list , snake_case__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_snake_case : str = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 64 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = 'marian'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , _A : Dict=58_101 , _A : Union[str, Any]=None , _A : Union[str, Any]=1_024 , _A : str=12 , _A : int=4_096 , _A : Optional[int]=16 , _A : str=12 , _A : Union[str, Any]=4_096 , _A : Optional[int]=16 , _A : str=0.0 , _A : Tuple=0.0 , _A : Dict=True , _A : Any=True , _A : int="gelu" , _A : List[Any]=1_024 , _A : List[str]=0.1 , _A : List[Any]=0.0 , _A : str=0.0 , _A : int=0.0_2 , _A : str=58_100 , _A : Optional[int]=False , _A : Tuple=58_100 , _A : int=0 , _A : Optional[int]=0 , _A : int=True , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[Any] = decoder_vocab_size or vocab_size
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : str = d_model
UpperCAmelCase__ : int = encoder_ffn_dim
UpperCAmelCase__ : Union[str, Any] = encoder_layers
UpperCAmelCase__ : Any = encoder_attention_heads
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = decoder_layers
UpperCAmelCase__ : List[Any] = decoder_attention_heads
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : Optional[Any] = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : int = init_std
UpperCAmelCase__ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Optional[Any] = use_cache
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase_ ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase__ : Optional[int] = {0: '''batch'''}
UpperCAmelCase__ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase__ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.num_layers
for i in range(_A ):
UpperCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase__ : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Optional[Any] = super().outputs
else:
UpperCAmelCase__ : Optional[int] = super(_A , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.num_layers
for i in range(_A ):
UpperCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase_ ( self : int , _A : int , _A : Tuple = -1 , _A : Dict = -1 , _A : int = False , _A : Optional[Any] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
# Generate decoder inputs
UpperCAmelCase__ : Union[str, Any] = seq_length if not self.use_past else 1
UpperCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
UpperCAmelCase__ : Tuple = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : List[str] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = common_inputs['''input_ids'''].shape
UpperCAmelCase__ : Union[str, Any] = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_attention_heads
UpperCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : List[str] = decoder_seq_length + 3
UpperCAmelCase__ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_A , _A )] , dim=1 )
UpperCAmelCase__ : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.num_layers
UpperCAmelCase__ : str = min(_A , _A )
UpperCAmelCase__ : int = max(_A , _A ) - min_num_layers
UpperCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
UpperCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def lowercase_ ( self : Union[str, Any] , _A : int , _A : Optional[int] = -1 , _A : str = -1 , _A : Optional[Any] = False , _A : Any = None , ):
'''simple docstring'''
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : Dict = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.num_attention_heads
UpperCAmelCase__ : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Tuple = common_inputs['''attention_mask'''].dtype
UpperCAmelCase__ : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 )
UpperCAmelCase__ : Optional[int] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def lowercase_ ( self : List[Any] , _A : List[Any] , _A : List[Any] = -1 , _A : int = -1 , _A : List[Any] = False , _A : str = None , ):
'''simple docstring'''
UpperCAmelCase__ : Any = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : List[str] = tokenizer.num_special_tokens_to_add(_A )
UpperCAmelCase__ : List[Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : Dict = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : Optional[Any] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def lowercase_ ( self : List[Any] , _A : str , _A : Optional[int] = -1 , _A : List[Any] = -1 , _A : str = False , _A : Dict = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
UpperCAmelCase__ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def lowercase_ ( self : Dict , _A : str , _A : List[str] , _A : Dict , _A : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Optional[int] = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
UpperCAmelCase__ : int = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return 1e-4
| 366 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : Optional[int] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) )
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Optional[int] = processor(text=_A )
UpperCAmelCase__ : List[str] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : List[Any] = processor.decode(_A )
UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Any , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : str = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : Optional[int] = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : Optional[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[int] = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : str = -2_0.0
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : List[str] = os.listdir(_A )
UpperCAmelCase__ : Any = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : int = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : str = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : List[Any] = iter(_A )
UpperCAmelCase__ : Optional[Any] = next(_A )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 299 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_a = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_UpperCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __lowerCAmelCase , )
is not None
):
_UpperCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_UpperCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_UpperCAmelCase = True
if not attribute_used:
_UpperCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase = True
elif attribute.endswith('_token_id' ):
_UpperCAmelCase = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __A ( __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_UpperCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase = inspect.getsourcefile(__lowerCAmelCase )
_UpperCAmelCase = os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_UpperCAmelCase = []
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase = []
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def __A ( )-> int:
"""simple docstring"""
_UpperCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase = check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = unused_attributes
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 39 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__snake_case : List[str] =logging.get_logger(__name__)
# General docstring
__snake_case : List[Any] ='MobileNetV1Config'
# Base docstring
__snake_case : Optional[Any] ='google/mobilenet_v1_1.0_224'
__snake_case : Any =[1, 1_0_2_4, 7, 7]
# Image classification docstring
__snake_case : Tuple ='google/mobilenet_v1_1.0_224'
__snake_case : Tuple ='tabby, tabby cat'
__snake_case : Optional[Any] =[
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Dict=None):
'''simple docstring'''
lowerCAmelCase__ : Dict = {}
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Any = model.mobilenet_va
else:
lowerCAmelCase__ : Tuple = model
lowerCAmelCase__ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase__ : Tuple = backbone.conv_stem.convolution.weight
lowerCAmelCase__ : int = backbone.conv_stem.normalization.bias
lowerCAmelCase__ : Optional[int] = backbone.conv_stem.normalization.weight
lowerCAmelCase__ : str = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ : str = backbone.conv_stem.normalization.running_var
for i in range(13):
lowerCAmelCase__ : Tuple = i + 1
lowerCAmelCase__ : Any = i * 2
lowerCAmelCase__ : Any = backbone.layer[pt_index]
lowerCAmelCase__ : List[str] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCAmelCase__ : Optional[int] = pointer.convolution.weight
lowerCAmelCase__ : Optional[int] = pointer.normalization.bias
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCAmelCase__ : List[Any] = pointer.normalization.running_mean
lowerCAmelCase__ : List[Any] = pointer.normalization.running_var
lowerCAmelCase__ : Dict = backbone.layer[pt_index + 1]
lowerCAmelCase__ : Optional[Any] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCAmelCase__ : Tuple = pointer.convolution.weight
lowerCAmelCase__ : int = pointer.normalization.bias
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCAmelCase__ : Any = pointer.normalization.running_mean
lowerCAmelCase__ : str = pointer.normalization.running_var
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase__ : Tuple = model.classifier.weight
lowerCAmelCase__ : Dict = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''')
raise
# Load weights from TF model
lowerCAmelCase__ : str = tf.train.list_variables(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""")
lowerCAmelCase__ : Dict = tf.train.load_variable(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : List[str] = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ : List[Any] = _build_tf_to_pytorch_map(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""")
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""")
continue
lowerCAmelCase__ : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''')
lowerCAmelCase__ : Optional[Any] = np.transpose(lowerCamelCase_ ,(2, 3, 0, 1))
elif "weights" in name:
logger.info('''Transposing''')
if len(pointer.shape) == 2: # copying into linear layer
lowerCAmelCase__ : List[str] = array.squeeze().transpose()
else:
lowerCAmelCase__ : Tuple = np.transpose(lowerCamelCase_ ,(3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""")
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""")
lowerCAmelCase__ : str = torch.from_numpy(lowerCamelCase_)
tf_weights.pop(lowerCamelCase_ ,lowerCamelCase_)
tf_weights.pop(name + '''/RMSProp''' ,lowerCamelCase_)
tf_weights.pop(name + '''/RMSProp_1''' ,lowerCamelCase_)
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,lowerCamelCase_)
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}""")
return model
def lowerCAmelCase__ ( lowerCamelCase_ : torch.Tensor ,lowerCamelCase_ : nn.Convad):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = features.shape[-2:]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = conv_layer.stride
lowerCAmelCase__ , lowerCAmelCase__ : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ : Dict = max(kernel_height - stride_height ,0)
else:
lowerCAmelCase__ : List[str] = max(kernel_height - (in_height % stride_height) ,0)
if in_width % stride_width == 0:
lowerCAmelCase__ : List[Any] = max(kernel_width - stride_width ,0)
else:
lowerCAmelCase__ : Any = max(kernel_width - (in_width % stride_width) ,0)
lowerCAmelCase__ : Union[str, Any] = pad_along_width // 2
lowerCAmelCase__ : Optional[Any] = pad_along_width - pad_left
lowerCAmelCase__ : List[Any] = pad_along_height // 2
lowerCAmelCase__ : int = pad_along_height - pad_top
lowerCAmelCase__ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ ,lowerCamelCase_ ,'''constant''' ,0.0)
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 1 ,__lowerCamelCase = 1 ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = True ,) -> None:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCAmelCase__ : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ : Optional[int] = nn.Convad(
in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=__lowerCamelCase ,stride=__lowerCamelCase ,padding=__lowerCamelCase ,groups=__lowerCamelCase ,bias=__lowerCamelCase ,padding_mode='''zeros''' ,)
if use_normalization:
lowerCAmelCase__ : Optional[int] = nn.BatchNormad(
num_features=__lowerCamelCase ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=__lowerCamelCase ,track_running_stats=__lowerCamelCase ,)
else:
lowerCAmelCase__ : Dict = None
if use_activation:
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,__lowerCamelCase ):
lowerCAmelCase__ : Any = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ : List[str] = config.hidden_act
else:
lowerCAmelCase__ : int = None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
lowerCAmelCase__ : str = apply_tf_padding(__lowerCamelCase ,self.convolution )
lowerCAmelCase__ : Tuple = self.convolution(__lowerCamelCase )
if self.normalization is not None:
lowerCAmelCase__ : Tuple = self.normalization(__lowerCamelCase )
if self.activation is not None:
lowerCAmelCase__ : Union[str, Any] = self.activation(__lowerCamelCase )
return features
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =MobileNetVaConfig
snake_case_ =load_tf_weights_in_mobilenet_va
snake_case_ ="""mobilenet_v1"""
snake_case_ ="""pixel_values"""
snake_case_ =False
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
if isinstance(__lowerCamelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__snake_case : Optional[int] =R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : List[Any] =R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = True ) -> int:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Dict = config
lowerCAmelCase__ : Dict = 32
lowerCAmelCase__ : List[str] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
lowerCAmelCase__ : Optional[int] = MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=config.num_channels ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=2 ,)
lowerCAmelCase__ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ : Optional[int] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=strides[i] ,groups=__lowerCamelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=1 ,) )
lowerCAmelCase__ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase__ (self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCAmelCase__ : Optional[Any] = self.conv_stem(__lowerCamelCase )
lowerCAmelCase__ : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ : int = layer_module(__lowerCamelCase )
if output_hidden_states:
lowerCAmelCase__ : Optional[Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : Any = hidden_states
if self.pooler is not None:
lowerCAmelCase__ : str = torch.flatten(self.pooler(__lowerCamelCase ) ,start_dim=1 )
else:
lowerCAmelCase__ : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=__lowerCamelCase ,)
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = config.num_labels
lowerCAmelCase__ : Dict = MobileNetVaModel(__lowerCamelCase )
lowerCAmelCase__ : Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ : str = nn.Dropout(config.classifier_dropout_prob ,inplace=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = nn.Linear(__lowerCamelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase__ (self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.mobilenet_va(__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Optional[int] = self.classifier(self.dropout(__lowerCamelCase ) )
lowerCAmelCase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Optional[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : int = '''single_label_classification'''
else:
lowerCAmelCase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ : Dict = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Optional[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase__ : Tuple = loss_fct(__lowerCamelCase ,__lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : int = CrossEntropyLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Optional[int] = BCEWithLogitsLoss()
lowerCAmelCase__ : List[Any] = loss_fct(__lowerCamelCase ,__lowerCamelCase )
if not return_dict:
lowerCAmelCase__ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states ,)
| 129 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> int:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( )-> Tuple:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(__UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def A__ ( self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCamelCase = torch.device("""cpu""" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> str:
"""simple docstring"""
return False
| 183 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : float | Decimal , UpperCamelCase__ : float = 10**-10 )->Optional[Any]:
A__ = a
while True:
A__ = Decimal(A_ ) - (
Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(A_ ) ) < precision: # noqa: S307
return float(A_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 193 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMModel(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMForMaskedLM(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFLayoutLMForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFLayoutLMForTokenClassification(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : List[str] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_a : Any = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Optional[int] = False
_a : List[Any] = True
_a : Tuple = 10
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFLayoutLMModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
_UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
_UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_UpperCAmelCase = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
# test the sequence output on [0, :3, :3]
_UpperCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# test the pooled output on [1, :3]
_UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCAmelCase = model(
input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = (2,)
self.assertEqual(loss.shape , _SCREAMING_SNAKE_CASE )
# test the shape of the logits
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = (2, 2)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCAmelCase = model(
input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
# test the shape of the logits
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
# test the shape of the logits
_UpperCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _SCREAMING_SNAKE_CASE )
self.assertEqual(outputs.end_logits.shape , _SCREAMING_SNAKE_CASE )
| 185 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ : List[str] = 16
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 32
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] = 16 ) -> Dict:
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
__lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ : Tuple = mocked_dataloaders # noqa: F811
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCAmelCase ) == "1":
__lowerCamelCase = 2
# New Code #
__lowerCamelCase = int(args.gradient_accumulation_steps )
__lowerCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
__lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=__lowerCAmelCase , model=__lowerCAmelCase , local_sgd_steps=__lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
__lowerCamelCase = model(**__lowerCAmelCase )
__lowerCamelCase = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**__lowerCAmelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCAmelCase )
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__lowerCAmelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 270 |
'''simple docstring'''
import math
import unittest
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 161 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __A ( a_ :str) -> None:
__a : Union[str, Any] = analyze_text(a_)
__a : List[str] = list(''' ''' + ascii_lowercase)
# what is our total sum of probabilities.
__a : Any = sum(single_char_strings.values())
# one length string
__a : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__a : Dict = single_char_strings[ch]
__a : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(a_) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum):.1f}""")
# two len string
__a : int = sum(two_char_strings.values())
__a : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__a : Any = cha + cha
if sequence in two_char_strings:
__a : str = two_char_strings[sequence]
__a : Dict = int(a_) / all_sum
my_sec_sum += prob * math.loga(a_)
# print second entropy
print(F"""{round(-1 * my_sec_sum):.1f}""")
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}""")
def __A ( a_ :str) -> tuple[dict, dict]:
__a : Union[str, Any] = Counter() # type: ignore
__a : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a_) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __A ( ) -> Dict:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 360 |
"""simple docstring"""
def __A ( a_ :int = 1_00_00_00) -> int:
__a : Tuple = [i - 1 for i in range(limit + 1)]
for i in range(2 , limit + 1):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a_):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1])
if __name__ == "__main__":
print(solution())
| 188 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = 10
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = [1, 2, 3, 4]
UpperCamelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCamelCase__ , UpperCamelCase__ :int = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = ''''''
UpperCamelCase__ , UpperCamelCase__ :List[str] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCamelCase__ , UpperCamelCase__ :Tuple = process_story(UpperCamelCase_ )
UpperCamelCase__ :Any = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = torch.tensor([1, 2, 3, 4] )
UpperCamelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase__ :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase__ :Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = 101
UpperCamelCase__ :List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase__ :Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase__ :Any = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 | 0 |
"""simple docstring"""
from ....utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: int=2048) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = config.__dict__
__lowerCAmelCase : Dict = modal_hidden_size
if num_labels:
__lowerCAmelCase : Optional[int] = num_labels
| 58 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__snake_case : Tuple = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ) -> str:
if rng is None:
__lowerCAmelCase : str = random.Random()
__lowerCAmelCase : List[Any] = 1
for dim in shape:
total_dims *= dim
__lowerCAmelCase : int = []
for _ in range(__snake_case ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__lowerCAmelCase : Dict = np.array(__snake_case ,dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowercase ( __snake_case ,__snake_case=None ) -> Optional[Any]:
__lowerCAmelCase : List[str] = ids_tensor(__snake_case ,vocab_size=2 ,rng=__snake_case )
# make sure that at least one token is attended to for each batch
__lowerCAmelCase : str = 1
return attn_mask
@require_flax
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = ()
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Dict = inputs["input_ids"].shape[-1] // 2
__lowerCAmelCase : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
__lowerCAmelCase : str = jnp.ones_like(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__lowerCAmelCase : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__lowerCAmelCase : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = max_length
__lowerCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = pt_model_class(_SCREAMING_SNAKE_CASE).eval()
__lowerCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , flax_model.params)
__lowerCAmelCase : int = flax_model.generate(_SCREAMING_SNAKE_CASE).sequences
__lowerCAmelCase : Any = pt_model.generate(torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__lowerCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jit(model.generate)
__lowerCAmelCase : List[str] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = jit(model.generate)
__lowerCAmelCase : Optional[Any] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self._get_input_ids_and_config()
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Any = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = jit(model.generate)
__lowerCAmelCase : Dict = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Any = max_length
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_input_ids_and_config()
__lowerCAmelCase : str = True
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Tuple = 0.8
__lowerCAmelCase : Any = 10
__lowerCAmelCase : Any = 0.3
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : int = 8
__lowerCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = jit(model.generate)
__lowerCAmelCase : str = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = self._get_input_ids_and_config()
__lowerCAmelCase : int = max_length
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = jit(model.generate)
__lowerCAmelCase : str = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_input_ids_and_config()
__lowerCAmelCase : Union[str, Any] = max_length
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : int = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jit(model.generate)
__lowerCAmelCase : Union[str, Any] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : Tuple = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : int = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jit(model.generate)
__lowerCAmelCase : Dict = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jit(model.generate)
__lowerCAmelCase : Any = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = jit(model.generate)
__lowerCAmelCase : int = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
__lowerCAmelCase : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
__lowerCAmelCase : Optional[Any] = "Hello world"
__lowerCAmelCase : str = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "do_samples"):
model.generate(_SCREAMING_SNAKE_CASE , do_samples=_SCREAMING_SNAKE_CASE)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "foo"):
__lowerCAmelCase : int = {"foo": "bar"}
model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
| 58 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class a :
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class a :
SCREAMING_SNAKE_CASE : str = field(
default=__snake_case , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE : str = field(
default=__snake_case , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__snake_case , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features['label'].names
# Labels
lowerCamelCase_ = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(_lowerCamelCase : str ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase_ = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase_ = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase_ = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase_ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=_lowerCamelCase )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _lowerCamelCase )
trainer.save_metrics('train' , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=_lowerCamelCase )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(_lowerCamelCase , metric_key_prefix='predict' )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('predict' , _lowerCamelCase )
trainer.save_metrics('predict' , _lowerCamelCase )
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_lowerCamelCase ):
lowerCamelCase_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 183 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_SCREAMING_SNAKE_CASE : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
super().__init__()
lowerCamelCase_ = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(model.children() )[:-2]
lowerCamelCase_ = nn.Sequential(*__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase_ = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = torch.flatten(__SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCamelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = labels
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = transforms
def __len__( self : Any ) -> Any:
return len(self.data )
def __getitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowerCamelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase_ = sentence[: self.max_seq_length]
lowerCamelCase_ = torch.zeros(self.n_classes )
lowerCamelCase_ = 1
lowerCamelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCamelCase_ = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> str:
lowerCamelCase_ = [len(row['sentence'] ) for row in batch]
lowerCamelCase_ , lowerCamelCase_ = len(_lowerCamelCase ), max(_lowerCamelCase )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
lowerCamelCase_ = input_row['sentence']
lowerCamelCase_ = 1
lowerCamelCase_ = torch.stack([row['image'] for row in batch] )
lowerCamelCase_ = torch.stack([row['label'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_start_token'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ) -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 183 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Any:
# vision encoder
if "img_encoder.pos_embed" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""img_encoder.pos_embed""" ,"""vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
_lowerCAmelCase : List[str] = name.replace("""img_encoder.patch_embed.proj""" ,"""vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
_lowerCAmelCase : str = name.replace("""img_encoder.patch_embed.norm""" ,"""vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""img_encoder.layers""" ,"""vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
_lowerCAmelCase : Optional[int] = name.replace("""blocks""" ,"""layers""" )
if "attn" in name and "pre_assign" not in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""attn""" ,"""self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCAmelCase : List[str] = name.replace("""proj""" ,"""out_proj""" )
if "pre_assign_attn.attn.proj" in name:
_lowerCAmelCase : List[Any] = name.replace("""pre_assign_attn.attn.proj""" ,"""pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
_lowerCAmelCase : Optional[int] = name.replace("""norm1""" ,"""layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""norm2""" ,"""layer_norm2""" )
if "img_encoder.norm" in name:
_lowerCAmelCase : int = name.replace("""img_encoder.norm""" ,"""vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""text_encoder.token_embedding""" ,"""text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
_lowerCAmelCase : int = name.replace("""text_encoder.positional_embedding""" ,"""text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
_lowerCAmelCase : Optional[int] = name.replace("""text_encoder.transformer.resblocks.""" ,"""text_model.encoder.layers.""" )
if "ln_1" in name:
_lowerCAmelCase : Tuple = name.replace("""ln_1""" ,"""layer_norm1""" )
if "ln_2" in name:
_lowerCAmelCase : int = name.replace("""ln_2""" ,"""layer_norm2""" )
if "c_fc" in name:
_lowerCAmelCase : str = name.replace("""c_fc""" ,"""fc1""" )
if "c_proj" in name:
_lowerCAmelCase : List[Any] = name.replace("""c_proj""" ,"""fc2""" )
if "text_encoder" in name:
_lowerCAmelCase : Dict = name.replace("""text_encoder""" ,"""text_model""" )
if "ln_final" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""ln_final""" ,"""final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""img_projector.linear_hidden.""" ,"""visual_projection.""" )
if "img_projector.linear_out." in name:
_lowerCAmelCase : List[str] = name.replace("""img_projector.linear_out.""" ,"""visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
_lowerCAmelCase : Tuple = name.replace("""text_projector.linear_hidden""" ,"""text_projection""" )
if "text_projector.linear_out" in name:
_lowerCAmelCase : List[Any] = name.replace("""text_projector.linear_out""" ,"""text_projection.3""" )
return name
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ) -> List[Any]:
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : Tuple = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase : Optional[Any] = key.split(""".""" )
_lowerCAmelCase : int = int(key_split[2] ), int(key_split[4] )
_lowerCAmelCase : Union[str, Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCAmelCase : str = val[:dim, :]
_lowerCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
_lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCAmelCase : str = val[:dim]
_lowerCAmelCase : Any = val[dim : dim * 2]
_lowerCAmelCase : List[str] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase : Optional[int] = key.split(""".""" )
_lowerCAmelCase : str = int(key_split[3] )
_lowerCAmelCase : Dict = config.text_config.hidden_size
if "weight" in key:
_lowerCAmelCase : Any = val[:dim, :]
_lowerCAmelCase : int = val[
dim : dim * 2, :
]
_lowerCAmelCase : Optional[int] = val[-dim:, :]
else:
_lowerCAmelCase : Union[str, Any] = val[:dim]
_lowerCAmelCase : int = val[dim : dim * 2]
_lowerCAmelCase : Optional[int] = val[-dim:]
else:
_lowerCAmelCase : List[Any] = rename_key(_lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCAmelCase : Any = val.squeeze_()
else:
_lowerCAmelCase : Union[str, Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : Dict = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int]="groupvit-gcc-yfcc" ,_lowerCamelCase : List[Any]=False ) -> Dict:
_lowerCAmelCase : Tuple = GroupViTConfig()
_lowerCAmelCase : Dict = GroupViTModel(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase ,map_location="""cpu""" )["""model"""]
_lowerCAmelCase : List[str] = convert_state_dict(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCamelCase ) == 0)
# verify result
_lowerCAmelCase : Tuple = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : Union[str, Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] ,images=_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors="""pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model(**_lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCAmelCase : str = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCAmelCase : Union[str, Any] = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image ,_lowerCamelCase ,atol=1e-3 )
processor.save_pretrained(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print("""Successfully saved processor and model to""" ,_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(_lowerCamelCase ,organization="""nielsr""" )
model.push_to_hub(_lowerCamelCase ,organization="""nielsr""" )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_a : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 362 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a : Union[str, Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
def __init__( self , a__ , a__=16 , a__=13 , a__=7 , a__=14 , a__=10 , a__=19 , a__=5 , a__=4 , a__=True , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=[1, 2, 3, 4, 5] , a__=25 , a__=5 , ):
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : int = context_length
_lowerCAmelCase : Optional[Any] = cardinality
_lowerCAmelCase : Tuple = num_time_features
_lowerCAmelCase : str = lags_sequence
_lowerCAmelCase : int = embedding_dimension
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : Optional[int] = prediction_length + label_length
_lowerCAmelCase : Dict = label_length
_lowerCAmelCase : Dict = moving_average
_lowerCAmelCase : Union[str, Any] = autocorrelation_factor
def __A ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = self.get_config()
_lowerCAmelCase : str = self.prepare_autoformer_inputs_dict(a__ )
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = AutoformerModel(config=a__ ).to(a__ ).eval()
_lowerCAmelCase : int = model(**a__ )
_lowerCAmelCase : List[str] = outputs.encoder_last_hidden_state
_lowerCAmelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_encoder()
encoder.save_pretrained(a__ )
_lowerCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = model.create_network_inputs(**a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCAmelCase : Dict = encoder(inputs_embeds=a__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowerCAmelCase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCAmelCase : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_decoder()
decoder.save_pretrained(a__ )
_lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase : List[Any] = decoder(
trend=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
def __A ( self ):
_lowerCAmelCase : Tuple = AutoformerModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_class.from_pretrained(a__ , output_loading_info=a__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Dict = inspect.signature(getattr(a__ , """forward""" ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(a__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(a__ )] , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """seq_length""" , a__ )
_lowerCAmelCase : List[str] = getattr(self.model_tester , """decoder_seq_length""" , a__ )
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , a__ )
_lowerCAmelCase : int = getattr(self.model_tester , """d_model""" , a__ )
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , a__ )
_lowerCAmelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCAmelCase : Dict = len(a__ )
_lowerCAmelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a__ , a__ )
# decoder attentions
_lowerCAmelCase : int = outputs.decoder_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCAmelCase : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 2 , len(a__ ) )
_lowerCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __A ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any="train-batch.pt" ) -> Optional[int]:
_lowerCAmelCase : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_lowerCamelCase ,repo_type="""dataset""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : List[Any] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_lowerCAmelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : Union[str, Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_lowerCAmelCase : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_lowerCAmelCase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=a__ )
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a__ , rtol=1e-1 ) )
| 126 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ) -> int:
__lowerCamelCase : Dict = sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : x[0] / x[1] , reverse=UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : List[Any] = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : Dict = list(accumulate(UpperCAmelCase_ ) )
__lowerCamelCase : Union[str, Any] = bisect(UpperCAmelCase_ , UpperCAmelCase_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> str:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
if len(UpperCAmelCase_ ) <= 1:
return arr, 0
__lowerCamelCase : str = len(UpperCAmelCase_ ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : List[str] = arr[mid:]
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Any = _count_cross_inversions(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = 0
while i < len(UpperCAmelCase_ ) and j < len(UpperCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Any = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
# an empty list should also have zero inversions
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def a__ ( ) -> Generator[int, None, None]:
UpperCAmelCase__ : dict[int, int] = {}
UpperCAmelCase__ : Tuple = 2
while True:
UpperCAmelCase__ : str = factor_map.pop(lowerCAmelCase , lowerCAmelCase )
if factor:
UpperCAmelCase__ : str = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase__ : Dict = factor
else:
UpperCAmelCase__ : str = prime
yield prime
prime += 1
def a__ ( lowerCAmelCase = 1E10 ) -> int:
UpperCAmelCase__ : Optional[int] = sieve()
UpperCAmelCase__ : Union[str, Any] = 1
while True:
UpperCAmelCase__ : Tuple = next(lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 166 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase=0.01 , _lowerCamelCase=1000 ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = p_stop
UpperCAmelCase__ : Any = max_length
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase__ : Dict = random.random() < self.p_stop
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True ):
"""simple docstring"""
UpperCAmelCase__ : int = [
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
for i in range(2 )
]
UpperCAmelCase__ : List[Any] = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase__ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase__ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase__ : List[str] = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=False ):
"""simple docstring"""
random.seed(_lowerCamelCase )
UpperCAmelCase__ : Tuple = list(_lowerCamelCase )
UpperCAmelCase__ : Any = [
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase )
]
UpperCAmelCase__ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase__ : str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
UpperCAmelCase__ : Union[str, Any] = []
for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = 42
UpperCAmelCase__ : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
# Edge case with a very small dataset
UpperCAmelCase__ : str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = SkipBatchSampler(_lowerCamelCase , 2 )
self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCAmelCase__ : int = skip_first_batches(_lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _a (self ):
"""simple docstring"""
Accelerator()
UpperCAmelCase__ : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 166 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( lowerCamelCase__ ):
__UpperCAmelCase : List[Any] = ['image_processor', 'tokenizer']
__UpperCAmelCase : Dict = 'ChineseCLIPImageProcessor'
__UpperCAmelCase : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
snake_case : Any = kwargs.pop("feature_extractor" )
snake_case : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
snake_case : Optional[int] = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
snake_case : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer.model_input_names
snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
| 203 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188 | 0 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] = True , _snake_case : int = math.inf , _snake_case : Tuple = -math.inf , _snake_case : Union[str, Any] = math.inf , _snake_case : List[Any] = -math.inf , _snake_case : Dict = False , _snake_case : Any = 100 , _snake_case : Dict = 0.01 , _snake_case : List[Any] = 1 , ):
lowerCAmelCase : Dict = False
lowerCAmelCase : Tuple = search_prob
lowerCAmelCase : Optional[int] = start_temperate
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Dict = 0
lowerCAmelCase : Union[str, Any] = None
while not search_end:
lowerCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase : Optional[Any] = current_state
scores.append(A_ )
iterations += 1
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase : Optional[int] = random.randint(0 , len(A_ ) - 1 ) # picking a random neighbor
lowerCAmelCase : Dict = neighbors.pop(A_ )
lowerCAmelCase : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase : int = picked_neighbor
else:
lowerCAmelCase : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase : Optional[int] = picked_neighbor
lowerCAmelCase : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase : Optional[Any] = True
else:
lowerCAmelCase : Any = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ) , A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case__ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case__ : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case__ : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[Any] ):
return (3 * x**2) - (6 * y)
snake_case__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case__ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f"""{local_min.score()}"""
)
snake_case__ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case__ : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f"""{local_min.score()}"""
)
| 358 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Optional[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 314 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''facebook/nllb-200-distilled-600M'''
UpperCamelCase = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
UpperCamelCase = '''translator'''
UpperCamelCase = AutoTokenizer
UpperCamelCase = AutoModelForSeqaSeqLM
UpperCamelCase = LANGUAGE_CODES
UpperCamelCase = ['''text''', '''text''', '''text''']
UpperCamelCase = ['''text''']
def snake_case_( self , A , A , A ) -> Optional[int]:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_SCREAMING_SNAKE_CASE = self.lang_to_code[src_lang]
_SCREAMING_SNAKE_CASE = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors="""pt""" , src_lang=A , tgt_lang=A )
def snake_case_( self , A ) -> str:
return self.model.generate(**A )
def snake_case_( self , A ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 58 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=8 ) ->Tuple:
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_( self , A , A , A , A , A , A ) -> Union[str, Any]:
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(A )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_( self , A=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def snake_case_( self , A=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_( self ) -> Tuple:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.unet.config.in_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""image_embeds""": image_embeds}
_SCREAMING_SNAKE_CASE = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 58 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ['image_processor', 'tokenizer']
A : Optional[Any] = 'CLIPImageProcessor'
A : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
a : Optional[int] = kwargs.pop('feature_extractor')
a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
def __call__( self : List[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
a : Any = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase)
if images is not None:
a : int = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase)
if text is not None and images is not None:
a : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase) , tensor_type=__lowerCAmelCase)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = self.tokenizer.model_input_names
a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 360 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->str:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =pipeline(
'document-question-answering' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
lowerCamelCase__ : Tuple =INVOICE_URL
lowerCamelCase__ : Optional[Any] =list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
lowerCamelCase__ : Optional[Any] ='What is the placebo?'
lowerCamelCase__ : List[str] =[
{
'image': load_image(lowerCamelCase_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] =dqa_pipeline(lowerCamelCase_ , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [
[
{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ ), 'start': ANY(lowerCamelCase_ ), 'end': ANY(lowerCamelCase_ )},
{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ ), 'start': ANY(lowerCamelCase_ ), 'end': ANY(lowerCamelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCamelCase__ : Any =INVOICE_URL
lowerCamelCase__ : Union[str, Any] ='How many cats are there?'
lowerCamelCase__ : List[Any] =[
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCamelCase__ : Dict =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
lowerCamelCase__ : int =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase__ : str ='./tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase__ : str ='./tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : Tuple =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , words=lowerCamelCase_ , boxes=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : int =pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCamelCase__ : Dict =INVOICE_URL
lowerCamelCase__ : int ='What is the invoice number?'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : Optional[int] =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : List[str] =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : int =pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCamelCase__ : Tuple =INVOICE_URL
lowerCamelCase__ : Any ='What is the invoice number?'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : List[Any] =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : List[Any] =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : int =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCamelCase_ , revision='3dc6de3' , )
lowerCamelCase__ : int =INVOICE_URL
lowerCamelCase__ : Tuple ='What is the invoice number?'
lowerCamelCase__ : Optional[Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase__ : Optional[int] =dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase__ : Optional[Any] =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCamelCase__ : Tuple =list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase__ : Optional[int] =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Any =pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCamelCase_ , revision='3dc6de3' , max_seq_len=50 , )
lowerCamelCase__ : Dict =INVOICE_URL
lowerCamelCase__ : Optional[Any] ='What is the invoice number?'
lowerCamelCase__ : Tuple =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase__ : Tuple =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCamelCase__ : str =list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase__ : Union[str, Any] =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCamelCase__ : Union[str, Any] =INVOICE_URL
lowerCamelCase__ : Union[str, Any] ='What is the invoice number?'
lowerCamelCase__ : Union[str, Any] =dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
| 126 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """bart"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A_=50265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , )-> Tuple:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
UpperCamelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase = {0: 'batch'}
UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(A_ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super().outputs
else:
UpperCamelCase = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(A_ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase = seq_length if not self.use_past else 1
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
UpperCamelCase = common_inputs['decoder_input_ids'].shape[1]
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = decoder_seq_length + 3
UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase = min(A_ , A_ )
UpperCamelCase = max(A_ , A_ ) - min_num_layers
UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = common_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 369 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Tuple = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase : Dict = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : Any = [1, 10_24, 7, 7]
# Image classification docstring
lowerCAmelCase : Optional[Any] = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : List[str] = 'tabby, tabby cat'
lowerCAmelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_( A : Union[str, Any] , A : Optional[Any] , A : Optional[Any]=None):
UpperCamelCase = {}
if isinstance(A , A):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = 'MobilenetV1/Conv2d_0/'
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(A , A):
UpperCamelCase = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def A_( A : int , A : str , A : Optional[int]):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(A)
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(A , A , A)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase = np.transpose(A , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(A , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
UpperCamelCase = torch.from_numpy(A)
tf_weights.pop(A , A)
tf_weights.pop(name + '/RMSProp' , A)
tf_weights.pop(name + '/RMSProp_1' , A)
tf_weights.pop(name + '/ExponentialMovingAverage' , A)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def A_( A : torch.Tensor , A : nn.Convad):
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0)
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0)
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , 'constant' , 0.0)
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ = 1 , A_ = 1 , A_ = False , A_ = True , A_ = True , )-> None:
'''simple docstring'''
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=A_ , out_channels=A_ , kernel_size=A_ , stride=A_ , padding=A_ , groups=A_ , bias=A_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=A_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=A_ , track_running_stats=A_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(A_ , A_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(A_ , self.convolution )
UpperCamelCase = self.convolution(A_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(A_ )
if self.activation is not None:
UpperCamelCase = self.activation(A_ )
return features
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = """mobilenet_v1"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ = True )-> Union[str, Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
A_ , in_channels=config.num_channels , out_channels=A_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=3 , stride=strides[i] , groups=A_ , ) )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase = self.conv_stem(A_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(A_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(A_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=A_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> None:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(A_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=A_ )
UpperCamelCase = nn.Linear(A_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(A_ , output_hidden_states=A_ , return_dict=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(A_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = 'single_label_classification'
else:
UpperCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(A_ , A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A_ , logits=A_ , hidden_states=outputs.hidden_states , )
| 251 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase = False
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion')
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__lowercase =torch.manual_seed(0)
__lowercase =pipe(
image=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 166 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCamelCase = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCamelCase = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCamelCase = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int=False):
'''simple docstring'''
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 166 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowerCAmelCase : int = TaTokenizerFast
_lowerCAmelCase : List[Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 340 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__a = True
except (ImportError, AttributeError):
__a = object
def A_ ( *_lowercase, **_lowercase ):
'''simple docstring'''
pass
__a = False
__a = logging.get_logger("transformers-cli/serving")
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(_lowercase, args.host, args.port, args.workers )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : dict
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[str]
_A : Optional[List[int]]
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : str
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Any
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( snake_case: ArgumentParser ) -> Tuple:
snake_case_ :Any = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=snake_case , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=snake_case , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=snake_case , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=snake_case , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=snake_case , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=snake_case , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=snake_case , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=snake_case , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=snake_case )
def __init__( self: int , snake_case: Pipeline , snake_case: str , snake_case: int , snake_case: int ) -> List[Any]:
snake_case_ :Optional[Any] = pipeline
snake_case_ :Optional[Any] = host
snake_case_ :Optional[Any] = port
snake_case_ :Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"""Serving model over {host}:{port}""" )
snake_case_ :List[str] = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=snake_case , response_class=snake_case , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ),
] , timeout=600 , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase_ ( self: Any ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase_ ( self: Tuple , snake_case: str = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) ) -> Union[str, Any]:
try:
snake_case_ :Dict = self._pipeline.tokenizer.tokenize(snake_case )
if return_ids:
snake_case_ :int = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case )
return ServeTokenizeResult(tokens=snake_case , tokens_ids=snake_case )
else:
return ServeTokenizeResult(tokens=snake_case )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(snake_case )} )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[int] = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) , ) -> Union[str, Any]:
try:
snake_case_ :Dict = self._pipeline.tokenizer.decode(snake_case , snake_case , snake_case )
return ServeDeTokenizeResult(model="""""" , text=snake_case )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(snake_case )} )
async def lowerCAmelCase_ ( self: Dict , snake_case: Optional[int]=Body(snake_case , embed=snake_case ) ) -> Union[str, Any]:
# Check we don't have empty string
if len(snake_case ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case_ :List[str] = self._pipeline(snake_case )
return ServeForwardResult(output=snake_case )
except Exception as e:
raise HTTPException(500 , {"""error""": str(snake_case )} )
| 66 |
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
_snake_case = self.unet.config.sample_size
_snake_case = (batch_size, 3, img_size, img_size)
_snake_case = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_snake_case = self.scheduler.schedule[t]
_snake_case = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_snake_case , _snake_case = self.scheduler.add_noise_to_input(lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_snake_case = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_snake_case = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_snake_case = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_snake_case = self.scheduler.step_correct(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , step_output.prev_sample , step_output['derivative'] , )
_snake_case = step_output.prev_sample
_snake_case = (sample / 2 + 0.5).clamp(0 , 1 )
_snake_case = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 295 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295 | 1 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
a_ : str = 1
a_ : Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 10, 9
UpperCAmelCase_ : List[Any] = defaultdict(list)
UpperCAmelCase_ : dict[int, bool] = {}
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 32 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowercase : Union[str, Any] = "pt"
elif is_tf_available():
_lowercase : int = "tf"
else:
_lowercase : Union[str, Any] = "jax"
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = PerceiverTokenizer
_a = False
def snake_case ( self : Optional[Any] )-> Optional[int]:
super().setUp()
lowerCamelCase__ : Optional[Any] =PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self : str )-> Optional[int]:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def snake_case ( self : List[str], **lowerCamelCase : int )-> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : List[str], lowerCamelCase : str=False, lowerCamelCase : Dict=20, lowerCamelCase : str=5 )-> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : Optional[int] =[]
for i in range(len(lowerCamelCase ) ):
try:
lowerCamelCase__ : List[Any] =tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any =list(filter(lambda lowerCamelCase : re.match(r'''^[ a-zA-Z]+$''', t[1] ), lowerCamelCase ) )
lowerCamelCase__ : Optional[int] =list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase ), lowerCamelCase ) )
if max_length is not None and len(lowerCamelCase ) > max_length:
lowerCamelCase__ : Tuple =toks[:max_length]
if min_length is not None and len(lowerCamelCase ) < min_length and len(lowerCamelCase ) > 0:
while len(lowerCamelCase ) < min_length:
lowerCamelCase__ : int =toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : str =[t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Dict =tokenizer.decode(lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase )
if " " not in output_txt and len(lowerCamelCase ) > 1:
lowerCamelCase__ : Optional[int] =(
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase )
)
if with_prefix_space:
lowerCamelCase__ : Tuple =''' ''' + output_txt
lowerCamelCase__ : Union[str, Any] =tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
return output_txt, output_ids
def snake_case ( self : str )-> Dict:
lowerCamelCase__ : int =self.perceiver_tokenizer
lowerCamelCase__ : List[Any] ='''Unicode €.'''
lowerCamelCase__ : List[Any] =tokenizer(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''], lowerCamelCase )
# decoding
lowerCamelCase__ : str =tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase, '''[CLS]Unicode €.[SEP]''' )
lowerCamelCase__ : int =tokenizer('''e è é ê ë''' )
lowerCamelCase__ : int =[4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''], lowerCamelCase )
# decoding
lowerCamelCase__ : str =tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase, '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ), '''[CLS]e è é ê ë[SEP]''' )
def snake_case ( self : str )-> List[Any]:
lowerCamelCase__ : Tuple =self.perceiver_tokenizer
lowerCamelCase__ : str =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCamelCase__ : int =[4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase__ : int =tokenizer(lowerCamelCase, padding=lowerCamelCase, return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
if FRAMEWORK != "jax":
lowerCamelCase__ : Optional[Any] =list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : Dict =list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 38), batch.input_ids.shape )
self.assertEqual((2, 38), batch.attention_mask.shape )
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Tuple =self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase__ : str =tokenizer(lowerCamelCase, padding=lowerCamelCase, return_tensors=lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''', lowerCamelCase )
self.assertIn('''attention_mask''', lowerCamelCase )
self.assertNotIn('''decoder_input_ids''', lowerCamelCase )
self.assertNotIn('''decoder_attention_mask''', lowerCamelCase )
def snake_case ( self : List[Any] )-> List[Any]:
lowerCamelCase__ : List[str] =self.perceiver_tokenizer
lowerCamelCase__ : int =[
'''Summary of the text.''',
'''Another summary.''',
]
lowerCamelCase__ : Any =tokenizer(
text_target=lowerCamelCase, max_length=32, padding='''max_length''', truncation=lowerCamelCase, return_tensors=lowerCamelCase )
self.assertEqual(32, targets['''input_ids'''].shape[1] )
def snake_case ( self : Optional[Any] )-> Any:
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase__ : int =tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer.__class__.from_pretrained(lowerCamelCase )
lowerCamelCase__ : str =after_tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
shutil.rmtree(lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Tuple =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCamelCase__ : int =tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase__ : str =tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =tokenizer.__class__.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =after_tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
self.assertIn('''new_additional_special_token''', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowerCamelCase__ : List[Any] =tokenizer.__class__.from_pretrained(lowerCamelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(lowerCamelCase )
def snake_case ( self : List[str] )-> Union[str, Any]:
lowerCamelCase__ : Optional[Any] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase, '''special_tokens_map.json''' ), encoding='''utf-8''' ) as json_file:
lowerCamelCase__ : List[Any] =json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase, '''tokenizer_config.json''' ), encoding='''utf-8''' ) as json_file:
lowerCamelCase__ : Tuple =json.load(lowerCamelCase )
lowerCamelCase__ : Dict =[F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : Dict =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCamelCase__ : Optional[Any] =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase, '''special_tokens_map.json''' ), '''w''', encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase, lowerCamelCase )
with open(os.path.join(lowerCamelCase, '''tokenizer_config.json''' ), '''w''', encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase, lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Union[str, Any] =tokenizer_class.from_pretrained(
lowerCamelCase, )
self.assertIn(
'''an_additional_special_token''', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[int] =added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''', lstrip=lowerCamelCase )]
lowerCamelCase__ : Any =tokenizer_class.from_pretrained(
lowerCamelCase, additional_special_tokens=lowerCamelCase, )
self.assertIn('''a_new_additional_special_token''', tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ), )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : int =self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ), '''�''' )
def snake_case ( self : Any )-> Union[str, Any]:
pass
def snake_case ( self : Union[str, Any] )-> int:
pass
def snake_case ( self : int )-> str:
pass
def snake_case ( self : Optional[Any] )-> List[str]:
pass
def snake_case ( self : str )-> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCamelCase__ : List[str] =self.get_tokenizers(fast=lowerCamelCase, do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict =['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCamelCase__ : Any =tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
| 355 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowercase : Tuple = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : str =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Dict =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(
image=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''', ).images
lowerCamelCase__ : Dict =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 272 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a__ ( A_ = True, *A_, **A_ ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__magic_name__ = False
if main_process_only:
__magic_name__ = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase, **__UpperCamelCase, disable=__UpperCamelCase )
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowerCamelCase ).to(_lowerCamelCase )
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase_: Dict = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCamelCase_: Optional[Any] = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCamelCase_: int = model(input_ids.to(_lowerCamelCase ) , labels=labels.to(_lowerCamelCase ) ).loss
UpperCamelCase_: Tuple = -(labels.shape[-1] * loss.item())
UpperCamelCase_: Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 365 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
A_ : Optional[int] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Dict = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase_: Tuple = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case (UpperCAmelCase__ ) -> List[str]:
if dtype == torch.bool:
return 1 / 8
UpperCamelCase_: Optional[Any] = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCamelCase_: List[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
# Construct model
if bloom_config_file == "":
UpperCamelCase_: List[str] = BloomConfig()
else:
UpperCamelCase_: List[str] = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
UpperCamelCase_: str = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: List[str] = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Optional[int] = {'weight_map': {}, 'metadata': {}}
UpperCamelCase_: str = 0
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: int = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print('Processing file: {}'.format(UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: List[Any] = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: List[str] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Optional[int] = list(temp.keys() )
for key in keys:
UpperCamelCase_: List[Any] = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Dict = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Dict = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Optional[int] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCamelCase_: int = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase_: Dict = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
UpperCamelCase_: Union[str, Any] = BloomConfig()
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
UpperCamelCase_: Optional[int] = total_size
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_: Tuple = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + '\n'
f.write(UpperCAmelCase__ )
else:
UpperCamelCase_: Optional[Any] = BloomModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: Tuple = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i, file in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: Union[str, Any] = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: Any = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: Union[str, Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Dict = list(temp.keys() )
for key in keys:
UpperCamelCase_: Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Any = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Optional[int] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Tuple = tensors[key] / pretraining_tp
UpperCamelCase_: Any = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCamelCase_: Any = set(other_keys.missing_keys )
else:
UpperCamelCase_: int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCamelCase_: Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
A_ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 292 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase = None
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase = """▁"""
# Segments (not really needed)
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = 3
_UpperCAmelCase = 4
class a ( _UpperCAmelCase ):
UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[Any] = 'left'
UpperCamelCase : int = XLNetTokenizer
def __init__( self : Optional[int] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=False , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Any="</s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : Optional[Any]="<sep>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : List[Any]="<cls>" , lowerCAmelCase : Dict="<mask>" , lowerCAmelCase : List[str]=["<eop>", "<eod>"] , **lowerCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: Tuple =3
SCREAMING_SNAKE_CASE_: List[Any] =do_lower_case
SCREAMING_SNAKE_CASE_: Optional[Any] =remove_space
SCREAMING_SNAKE_CASE_: Optional[int] =keep_accents
SCREAMING_SNAKE_CASE_: List[str] =vocab_file
SCREAMING_SNAKE_CASE_: int =False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : int = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: List[str] =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 173 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = int(number**0.5 )
return number == sq * sq
def A_ ( snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase : int = x_den * y_den * z_den
UpperCamelCase : int = gcd(snake_case_ ,snake_case_ )
top //= hcf
bottom //= hcf
return top, bottom
def A_ ( snake_case_ : int = 3_5 ):
'''simple docstring'''
UpperCamelCase : set = set()
UpperCamelCase : int
UpperCamelCase : Fraction = Fraction(0 )
UpperCamelCase : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
UpperCamelCase : Union[str, Any] = x_num * y_den + x_den * y_num
UpperCamelCase : Any = x_den * y_den
UpperCamelCase : List[str] = gcd(snake_case_ ,snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Union[str, Any] = add_three(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
unique_s.add(snake_case_ )
# n=2
UpperCamelCase : str = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase : int = x_den * x_den * y_den * y_den
if is_sq(snake_case_ ) and is_sq(snake_case_ ):
UpperCamelCase : Optional[Any] = int(sqrt(snake_case_ ) )
UpperCamelCase : int = int(sqrt(snake_case_ ) )
UpperCamelCase : Optional[int] = gcd(snake_case_ ,snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Union[str, Any] = add_three(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
unique_s.add(snake_case_ )
# n=-1
UpperCamelCase : Optional[Any] = x_num * y_num
UpperCamelCase : Any = x_den * y_num + x_num * y_den
UpperCamelCase : Optional[Any] = gcd(snake_case_ ,snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
unique_s.add(snake_case_ )
# n=2
UpperCamelCase : Union[str, Any] = x_num * x_num * y_num * y_num
UpperCamelCase : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(snake_case_ ) and is_sq(snake_case_ ):
UpperCamelCase : List[str] = int(sqrt(snake_case_ ) )
UpperCamelCase : List[str] = int(sqrt(snake_case_ ) )
UpperCamelCase : str = gcd(snake_case_ ,snake_case_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Optional[Any] = add_three(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
unique_s.add(snake_case_ )
for num, den in unique_s:
total += Fraction(snake_case_ ,snake_case_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 27 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( snake_case_ : Dataset ,snake_case_ : Dict[str, str] ):
'''simple docstring'''
UpperCamelCase : List[str] = args.log_outputs
UpperCamelCase : Tuple = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase : List[Any] = load_metric("""wer""" )
UpperCamelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCamelCase : str = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
UpperCamelCase : Dict = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
# print & log results
UpperCamelCase : Optional[int] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(f'{dataset_id}_eval_results.txt' ,"""w""" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase : Optional[Any] = f'log_{dataset_id}_predictions.txt'
UpperCamelCase : str = f'log_{dataset_id}_targets.txt'
with open(snake_case_ ,"""w""" ) as p, open(snake_case_ ,"""w""" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] ,snake_case_ : Tuple ):
p.write(f'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case_ ,with_indices=snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase : str = re.sub(snake_case_ ,"""""" ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase : Tuple = """ """.join(text.split(snake_case_ ) )
return text
def A_ ( snake_case_ : str ):
'''simple docstring'''
# load dataset
UpperCamelCase : Union[str, Any] = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase : Dict = feature_extractor.sampling_rate
# resample audio
UpperCamelCase : Optional[Any] = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase : int = 0 if torch.cuda.is_available() else -1
UpperCamelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Union[str, Any] ):
UpperCamelCase : List[Any] = asr(
batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
UpperCamelCase : Union[str, Any] = prediction["""text"""]
UpperCamelCase : Optional[Any] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase : Any = dataset.map(snake_case_ ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ ,snake_case_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__A : Optional[Any] = parser.parse_args()
main(args)
| 27 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 295 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase = parser.parse_args()
if args.check_lib:
lowerCAmelCase = importlib.import_module('''transformers''')
lowerCAmelCase = Path(transformers_module.__file__).parent
else:
lowerCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 295 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCAmelCase_ : Dict = False
try:
UpperCAmelCase_ : Optional[Any] = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : str = None , _UpperCAmelCase : list = [] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = choices
UpperCAmelCase__ = prompt
if sys.platform == "win32":
UpperCAmelCase__ = """*"""
else:
UpperCAmelCase__ = """➔ """
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _UpperCAmelCase )
else:
forceWrite(self.choices[index] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(_UpperCAmelCase )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Direction , _UpperCAmelCase : int = 1 ):
"""simple docstring"""
UpperCAmelCase__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_UpperCAmelCase )
move_cursor(_UpperCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_UpperCAmelCase )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = int(chr(self.current_selection ) )
UpperCAmelCase__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _UpperCAmelCase )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
UpperCAmelCase__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_UpperCAmelCase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase__ = int(builtins.input() )
except ValueError:
UpperCAmelCase__ = default_choice
else:
UpperCAmelCase__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_UpperCAmelCase , """\n""" )
return choice
| 364 |
'''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = len(grid[0] )
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(n_rows - 3 ):
UpperCAmelCase__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase__ = max(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if max_product > largest:
UpperCAmelCase__ = max_product
return largest
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
UpperCAmelCase__ = [[int(SCREAMING_SNAKE_CASE__ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE__ ) )]
return largest_product(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 61 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
while a != 0:
_a , _a = b % a, a
return b
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
if gcd(_A , _A ) != 1:
_a = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_A )
_a , _a , _a = 1, 0, a
_a , _a , _a = 0, 1, m
while va != 0:
_a = ua // va
_a , _a , _a , _a , _a , _a = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 63 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
"""simple docstring"""
from math import pow, sqrt
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(lowercase ,lowercase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 30 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple=13, lowerCamelCase : List[str]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Dict=True, lowerCamelCase : str=99, lowerCamelCase : Any=64, lowerCamelCase : int=5, lowerCamelCase : Dict=4, lowerCamelCase : Tuple=64, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[Any]=16, lowerCamelCase : str=2, lowerCamelCase : str=0.02, lowerCamelCase : Dict=3, lowerCamelCase : Optional[int]=4, lowerCamelCase : Dict=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowercase__ ( self : str ):
'''simple docstring'''
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def lowercase__ ( self : str, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = MPNetModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, __UpperCamelCase )
lowercase__ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = MPNetForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MPNetForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = MPNetForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MPNetForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ ,lowercase_ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = True
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = MPNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=__UpperCamelCase, hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__UpperCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__UpperCamelCase )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__ = model(__UpperCamelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __UpperCamelCase )
lowercase__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], __UpperCamelCase, atol=1E-4 ) )
| 207 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
from string import ascii_uppercase
a_ = {char: i for i, char in enumerate(ascii_uppercase)}
a_ = dict(enumerate(ascii_uppercase))
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Tuple = len(lowerCamelCase )
UpperCamelCase_ : List[str] = 0
while True:
if x == i:
UpperCamelCase_ : Dict = 0
if len(lowerCamelCase ) == len(lowerCamelCase ):
break
key += key[i]
i += 1
return key
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = ''
UpperCamelCase_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase_ : Dict = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : str = ''
UpperCamelCase_ : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase_ : Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowercase ( ):
UpperCamelCase_ : Tuple = 'THE GERMAN ATTACK'
UpperCamelCase_ : Union[str, Any] = 'SECRET'
UpperCamelCase_ : Dict = generate_key(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Optional[Any] = cipher_text(lowerCamelCase , lowerCamelCase )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(lowerCamelCase , lowerCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 50 |
from __future__ import annotations
import numpy as np
def __lowercase ( lowerCamelCase : list[float] ):
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self , __a , __a=14 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Optional[int] = seq_length
__a : Any = is_training
__a : Tuple = use_token_type_ids
__a : Optional[int] = use_input_mask
__a : Optional[int] = use_labels
__a : Dict = use_mc_token_ids
__a : Union[str, Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[Any] = intermediate_size
__a : int = hidden_act
__a : List[str] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : int = type_sequence_label_size
__a : Union[str, Any] = initializer_range
__a : Optional[Any] = num_labels
__a : List[str] = num_choices
__a : Any = scope
__a : List[Any] = self.vocab_size - 1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_input_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Tuple = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
if self.use_mc_token_ids:
__a : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a : List[Any] = None
__a : Dict = None
__a : Tuple = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Any = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
__a : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[int] = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__a : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Dict = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__a : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Any = config_and_inputs
__a : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Union[str, Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : str = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = CTRLModelTester(self )
__a : str = ConfigTester(self , config_class=__a , n_embd=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__a )
__a : Union[str, Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__a : List[Any] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a : List[str] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 27 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = MvpTokenizer
__lowerCAmelCase = MvpTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = filter_roberta_detectors
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase = {"""unk_token""": """<unk>"""}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str , **lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_ ) , padding=lowerCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , lowerCamelCase_ )
self.assertIn("""attention_mask""" , lowerCamelCase_ )
self.assertNotIn("""labels""" , lowerCamelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCamelCase_ )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ["""A long paragraph for summarization."""]
UpperCamelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , text_target=lowerCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = inputs["""input_ids"""]
UpperCamelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = """A, <mask> AllenNLP sentence."""
UpperCamelCase = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
UpperCamelCase = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 362 |
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(UpperCamelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCamelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return arr, 0
UpperCamelCase = len(UpperCamelCase_ ) // 2
UpperCamelCase = arr[0:mid]
UpperCamelCase = arr[mid:]
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = _count_cross_inversions(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = UpperCamelCase = UpperCamelCase = 0
while i < len(UpperCamelCase_ ) and j < len(UpperCamelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCamelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCamelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCamelCase = count_inversions_bf(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , UpperCamelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCamelCase = count_inversions_bf(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , UpperCamelCase_ )
# an empty list should also have zero inversions
UpperCamelCase = []
UpperCamelCase = count_inversions_bf(UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = count_inversions_recursive(UpperCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 165 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
snake_case_ = HUGGINGFACE_HUB_CACHE
snake_case_ = 'config.json'
snake_case_ = 'diffusion_pytorch_model.bin'
snake_case_ = 'diffusion_flax_model.msgpack'
snake_case_ = 'model.onnx'
snake_case_ = 'diffusion_pytorch_model.safetensors'
snake_case_ = 'weights.pb'
snake_case_ = 'https://huggingface.co'
snake_case_ = default_cache_path
snake_case_ = 'diffusers_modules'
snake_case_ = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
snake_case_ = ['fp16', 'non-ema']
snake_case_ = '.self_attn'
| 24 |
"""simple docstring"""
from collections import namedtuple
_a = namedtuple('from_to', 'from_ to')
_a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[int]=False ) -> Any:
UpperCAmelCase_ = OmegaConf.load(__UpperCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None ) -> Dict:
if conf_path is None:
UpperCAmelCase_ = '''./model_checkpoints/vqgan_only.yaml'''
UpperCAmelCase_ = load_config(__UpperCamelCase , display=__UpperCamelCase )
UpperCAmelCase_ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase_ = '''./model_checkpoints/vqgan_only.pt'''
UpperCAmelCase_ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
if ".ckpt" in ckpt_path:
UpperCAmelCase_ = sd['''state_dict''']
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
model.to(__UpperCamelCase )
del sd
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.encode(__UpperCamelCase )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
UpperCAmelCase_ = model.decode(__UpperCamelCase )
return xrec
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ = string.rsplit('''.''' , 1 )
if reload:
UpperCAmelCase_ = importlib.import_module(__UpperCamelCase )
importlib.reload(__UpperCamelCase )
return getattr(importlib.import_module(__UpperCamelCase , package=__UpperCamelCase ) , cls )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True ) -> List[str]:
UpperCAmelCase_ = instantiate_from_config(__UpperCamelCase )
if sd is not None:
model.load_state_dict(__UpperCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> Dict:
# load the specified checkpoint
if ckpt:
UpperCAmelCase_ = torch.load(__UpperCamelCase , map_location='''cpu''' )
UpperCAmelCase_ = pl_sd['''global_step''']
print(f'loaded model from global step {global_step}.' )
else:
UpperCAmelCase_ = {'''state_dict''': None}
UpperCAmelCase_ = None
UpperCAmelCase_ = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__UpperCamelCase , eval_mode=__UpperCamelCase )['''model''']
return model, global_step
| 177 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Union[str, Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowerCamelCase (snake_case_ ):
"""simple docstring"""
lowerCamelCase__ = '''audio-spectrogram-transformer'''
def __init__( self : Any , __magic_name__ : List[Any]=768 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Tuple=12 , __magic_name__ : Union[str, Any]=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=1e-12 , __magic_name__ : int=16 , __magic_name__ : List[Any]=True , __magic_name__ : str=10 , __magic_name__ : str=10 , __magic_name__ : List[str]=1_024 , __magic_name__ : Tuple=128 , **__magic_name__ : List[str] , ) -> List[Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = frequency_stride
SCREAMING_SNAKE_CASE_ = time_stride
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = num_mel_bins
| 353 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """pix2struct_text_model"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , UpperCAmelCase : Any=50244 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : List[Any]=64 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[Any]=1e-6 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : Any="gelu_new" , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Dict=False , UpperCAmelCase : List[str]=True , **UpperCAmelCase : Optional[int] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[int] = d_kv
lowerCamelCase__ : Union[str, Any] = d_ff
lowerCamelCase__ : Dict = num_layers
lowerCamelCase__ : Tuple = num_heads
lowerCamelCase__ : str = relative_attention_num_buckets
lowerCamelCase__ : Optional[Any] = relative_attention_max_distance
lowerCamelCase__ : Tuple = dropout_rate
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : str = initializer_factor
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : Dict = eos_token_id
lowerCamelCase__ : Dict = decoder_start_token_id
# for backwards compatibility
lowerCamelCase__ : Tuple = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , is_decoder=UpperCAmelCase , **UpperCAmelCase , )
@classmethod
def A_ ( cls : Tuple , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCamelCase__ : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """pix2struct_vision_model"""
def __init__( self : Optional[int] , UpperCAmelCase : int=768 , UpperCAmelCase : Optional[Any]=768 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : str=64 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Any="gelu_new" , UpperCAmelCase : List[Any]=1e-6 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Union[str, Any]=1e-10 , UpperCAmelCase : List[Any]=1.0 , UpperCAmelCase : Optional[int]=4096 , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : Union[str, Any]=128 , **UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Any = patch_embed_hidden_size
lowerCamelCase__ : Any = d_ff
lowerCamelCase__ : int = dropout_rate
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Optional[int] = initializer_factor
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : List[Any] = dense_act_fn
lowerCamelCase__ : int = seq_len
lowerCamelCase__ : str = relative_attention_num_buckets
lowerCamelCase__ : List[str] = relative_attention_max_distance
lowerCamelCase__ : Dict = d_kv
@classmethod
def A_ ( cls : Any , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCamelCase__ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """pix2struct"""
UpperCAmelCase__ = True
def __init__( self : str , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : str=1.0 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=True , **UpperCAmelCase : Dict , ) -> Optional[int]:
super().__init__(tie_word_embeddings=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
if text_config is None:
lowerCamelCase__ : List[Any] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
lowerCamelCase__ : Tuple = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
lowerCamelCase__ : List[Any] = PixaStructTextConfig(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = PixaStructVisionConfig(**UpperCAmelCase )
lowerCamelCase__ : List[str] = self.text_config.decoder_start_token_id
lowerCamelCase__ : int = self.text_config.pad_token_id
lowerCamelCase__ : Any = self.text_config.eos_token_id
lowerCamelCase__ : Optional[int] = initializer_factor
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Union[str, Any] = self.initializer_range
lowerCamelCase__ : Optional[Any] = self.initializer_range
lowerCamelCase__ : List[str] = is_vqa
@classmethod
def A_ ( cls : Dict , UpperCAmelCase : PixaStructTextConfig , UpperCAmelCase : PixaStructVisionConfig , **UpperCAmelCase : Union[str, Any] ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def A_ ( self : int ) -> Optional[int]:
lowerCamelCase__ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[Any] = self.text_config.to_dict()
lowerCamelCase__ : Any = self.vision_config.to_dict()
lowerCamelCase__ : List[Any] = self.__class__.model_type
return output
| 50 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50 | 1 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) - pat_len + 1 ):
lowercase__ : List[Any] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if s[i + j] != pattern[j]:
lowercase__ : List[str] = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 365 |
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Optional[Any] , __snake_case : Tuple ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase_ : str = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
lowercase_ : Union[str, Any] = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
lowercase_ : str = max(len(snake_case__ ) , len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : str = logging.get_logger(__name__)
# TODO: upload to AWS
A_ : Optional[int] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Any = 'retribert'
def __init__( self : Tuple , __UpperCAmelCase : Optional[Any]=3_0_5_2_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : List[Any]=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : List[Any]=5_1_2 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Any=1e-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=1_2_8 , __UpperCAmelCase : Tuple=0 , **__UpperCAmelCase : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = share_encoders
SCREAMING_SNAKE_CASE__ = projection_dim
| 165 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
__lowercase : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase ():
__a : Optional[int] = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase ():
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase ():
__a : Dict = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase ():
__a : Any = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__a : Optional[Any] = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase ():
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def lowerCamelCase ():
# laplace diagonals
__a : List[Any] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
__a : int = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def lowerCamelCase ():
assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any()
def lowerCamelCase ():
__a , __a : Optional[int] = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def lowerCamelCase ():
__a : Union[str, Any] = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ):
__a : str = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ):
__a : str = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase ():
__a : Optional[int] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__a : List[str] = imread(_SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
__a : Any = 0
__a : List[Any] = 0
__a : List[str] = image[x_coordinate][y_coordinate]
__a : Union[str, Any] = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__a : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__a : Optional[int] = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 294 |
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294 | 1 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ):
lowercase__ : str = {'''add_prefix_space''': True} if isinstance(UpperCAmelCase , UpperCAmelCase ) and not line.startswith(''' ''' ) else {}
lowercase__ : Any = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase , padding='''max_length''' if pad_to_max_length else None , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ):
lowercase__ : List[str] = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="train" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="" , ) -> List[str]:
super().__init__()
lowercase__ : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
lowercase__ : List[Any] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
lowercase__ : str = self.get_char_lens(self.src_file )
lowercase__ : List[str] = max_source_length
lowercase__ : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
lowercase__ : Any = tokenizer
lowercase__ : Union[str, Any] = prefix
if n_obs is not None:
lowercase__ : Any = self.src_lens[:n_obs]
lowercase__ : int = src_lang
lowercase__ : Tuple = tgt_lang
def __len__( self ) -> Optional[int]:
return len(self.src_lens )
def __getitem__( self , __lowerCAmelCase ) -> Dict[str, torch.Tensor]:
lowercase__ : Dict = index + 1 # linecache starts at 1
lowercase__ : int = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
lowercase__ : List[str] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
lowercase__ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
lowercase__ : Union[str, Any] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
lowercase__ : Dict = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
lowercase__ : Optional[int] = source_inputs['''input_ids'''].squeeze()
lowercase__ : int = target_inputs['''input_ids'''].squeeze()
lowercase__ : str = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase ) -> str:
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict[str, torch.Tensor]:
lowercase__ : Union[str, Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase__ : Optional[Any] = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase__ : Dict = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase__ : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
lowercase__ : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
lowercase__ : Optional[Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ , lowercase__ : Dict = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
lowercase__ : str = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__a: str = getLogger(__name__)
def __UpperCamelCase ( UpperCAmelCase ):
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : str = get_git_info()
save_json(UpperCAmelCase , os.path.join(UpperCAmelCase , '''git_log.json''' ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ):
with open(UpperCAmelCase , '''w''' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase , **UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : List[Any] = git.Repo(search_parent_directories=UpperCAmelCase )
lowercase__ : Union[str, Any] = {
'''repo_id''': str(UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return list(map(UpperCAmelCase , UpperCAmelCase ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
with open(UpperCAmelCase , '''wb''' ) as f:
return pickle.dump(UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
def remove_articles(UpperCAmelCase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = normalize_answer(UpperCAmelCase ).split()
lowercase__ : Optional[Any] = normalize_answer(UpperCAmelCase ).split()
lowercase__ : Tuple = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
lowercase__ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
lowercase__ : Any = 1.0 * num_same / len(UpperCAmelCase )
lowercase__ : Optional[Any] = 1.0 * num_same / len(UpperCAmelCase )
lowercase__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
lowercase__ : Tuple = 0
for hypo, pred in zip(UpperCAmelCase , UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def __UpperCamelCase ( UpperCAmelCase ):
return model_prefix.startswith('''rag''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ : Union[str, Any] = '''dropout_rate'''
for p in extra_params:
if getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and not hasattr(UpperCAmelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
continue
lowercase__ : Dict = p if hasattr(UpperCAmelCase , UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
return hparams, config
| 214 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0.9_9_9 , UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : str = []
for i in range(UpperCAmelCase ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase , dtype=torch.floataa )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = "fixed_small_log" , __lowerCAmelCase = True , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowercase__ : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
lowercase__ : List[Any] = 1.0 - self.betas
lowercase__ : int = torch.cumprod(self.alphas , dim=0 )
lowercase__ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ : Optional[Any] = 1.0
# setable values
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
lowercase__ : Tuple = variance_type
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]:
lowercase__ : List[str] = num_inference_steps
lowercase__ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : List[str] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple:
if prev_timestep is None:
lowercase__ : Any = t - 1
lowercase__ : Any = self.alphas_cumprod[t]
lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : str = 1 - alpha_prod_t
lowercase__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Tuple = self.betas[t]
else:
lowercase__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Any = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
lowercase__ : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : Union[str, Any] = variance.log()
lowercase__ : Optional[int] = beta.log()
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
lowercase__ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : int = t - 1
lowercase__ : Optional[int] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Optional[int] = self.betas[t]
lowercase__ : Optional[Any] = self.alphas[t]
else:
lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[Any] = 0
if t > 0:
lowercase__ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
lowercase__ : Union[str, Any] = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ : List[Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
lowercase__ : List[str] = variance * variance_noise
lowercase__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ : str = timesteps.to(original_samples.device )
lowercase__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowercase__ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 214 | 1 |
"""simple docstring"""
def lowercase ( A_ )-> int:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = '''▁'''
a__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a__ = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
a__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a__ = {'''mustc''': MUSTC_LANGS}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a=False , _a=False , _a=None , _a=None , _a = None , **_a , ) -> None:
_a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , do_upper_case=_a , do_lower_case=_a , tgt_lang=_a , lang_codes=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Any = do_upper_case
_a : Tuple = do_lower_case
_a : Union[str, Any] = load_json(_a )
_a : List[Any] = {v: k for k, v in self.encoder.items()}
_a : List[str] = spm_file
_a : Union[str, Any] = load_spm(_a , self.sp_model_kwargs )
if lang_codes is not None:
_a : Optional[Any] = lang_codes
_a : str = LANGUAGES[lang_codes]
_a : str = [F"""<lang:{lang}>""" for lang in self.langs]
_a : str = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
_a : Tuple = self.lang_tokens
_a : Any = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a : Union[str, Any] = {}
@property
def __lowercase ( self ) -> int:
return len(self.encoder )
@property
def __lowercase ( self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __lowercase ( self , _a ) -> None:
_a : Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(_a )
def __lowercase ( self , _a ) -> None:
_a : Tuple = self.lang_code_to_id[tgt_lang]
_a : str = [lang_code_id]
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> int:
return self.encoder.get(_a , self.encoder[self.unk_token] )
def __lowercase ( self , _a ) -> str:
return self.decoder.get(_a , self.unk_token )
def __lowercase ( self , _a ) -> str:
_a : Any = []
_a : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a : Tuple = self.sp_model.decode(_a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a : Optional[int] = []
else:
current_sub_tokens.append(_a )
_a : Union[str, Any] = self.sp_model.decode(_a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __lowercase ( self , _a , _a=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_a : Any = [1] * len(self.prefix_tokens )
_a : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
_a : str = self.__dict__.copy()
_a : List[Any] = None
return state
def __setstate__( self , _a ) -> None:
_a : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Optional[int] = {}
_a : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
_a : Any = Path(_a )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_a : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_a : Tuple = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , '''wb''' ) as fi:
_a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def __UpperCAmelCase ( __a : str ,__a : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_a : List[Any] = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def __UpperCAmelCase ( __a : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__a ,'''r''' ) as f:
return json.load(__a )
def __UpperCAmelCase ( __a : Dict ,__a : str ) -> None:
"""simple docstring"""
with open(__a ,'''w''' ) as f:
json.dump(__a ,__a ,indent=2 )
| 367 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_UpperCamelCase = name.replace('''img_encoder.pos_embed''', '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
_UpperCamelCase = name.replace('''img_encoder.patch_embed.proj''', '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
_UpperCamelCase = name.replace('''img_encoder.patch_embed.norm''', '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
_UpperCamelCase = name.replace('''img_encoder.layers''', '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
_UpperCamelCase = name.replace('''blocks''', '''layers''' )
if "attn" in name and "pre_assign" not in name:
_UpperCamelCase = name.replace('''attn''', '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
_UpperCamelCase = name.replace('''proj''', '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
_UpperCamelCase = name.replace('''pre_assign_attn.attn.proj''', '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
_UpperCamelCase = name.replace('''norm1''', '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
_UpperCamelCase = name.replace('''norm2''', '''layer_norm2''' )
if "img_encoder.norm" in name:
_UpperCamelCase = name.replace('''img_encoder.norm''', '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
_UpperCamelCase = name.replace('''text_encoder.token_embedding''', '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
_UpperCamelCase = name.replace('''text_encoder.positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
_UpperCamelCase = name.replace('''text_encoder.transformer.resblocks.''', '''text_model.encoder.layers.''' )
if "ln_1" in name:
_UpperCamelCase = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
_UpperCamelCase = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
_UpperCamelCase = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
_UpperCamelCase = name.replace('''c_proj''', '''fc2''' )
if "text_encoder" in name:
_UpperCamelCase = name.replace('''text_encoder''', '''text_model''' )
if "ln_final" in name:
_UpperCamelCase = name.replace('''ln_final''', '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
_UpperCamelCase = name.replace('''img_projector.linear_hidden.''', '''visual_projection.''' )
if "img_projector.linear_out." in name:
_UpperCamelCase = name.replace('''img_projector.linear_out.''', '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
_UpperCamelCase = name.replace('''text_projector.linear_hidden''', '''text_projection''' )
if "text_projector.linear_out" in name:
_UpperCamelCase = name.replace('''text_projector.linear_out''', '''text_projection.3''' )
return name
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_split[2] ), int(key_split[4] )
_UpperCamelCase = config.vision_config.hidden_size
if "weight" in key:
_UpperCamelCase = val[:dim, :]
_UpperCamelCase = val[dim : dim * 2, :]
_UpperCamelCase = val[-dim:, :]
else:
_UpperCamelCase = val[:dim]
_UpperCamelCase = val[dim : dim * 2]
_UpperCamelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_split[3] )
_UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
_UpperCamelCase = val[:dim, :]
_UpperCamelCase = val[
dim : dim * 2, :
]
_UpperCamelCase = val[-dim:, :]
else:
_UpperCamelCase = val[:dim]
_UpperCamelCase = val[dim : dim * 2]
_UpperCamelCase = val[-dim:]
else:
_UpperCamelCase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_UpperCamelCase = val.squeeze_()
else:
_UpperCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(lowerCAmelCase__, stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case="groupvit-gcc-yfcc", __snake_case=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = GroupViTConfig()
_UpperCamelCase = GroupViTModel(lowerCAmelCase__ ).eval()
_UpperCamelCase = torch.load(lowerCAmelCase__, map_location='''cpu''' )['''model''']
_UpperCamelCase = convert_state_dict(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase = model.load_state_dict(lowerCAmelCase__, strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
_UpperCamelCase = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(text=['''a photo of a cat''', '''a photo of a dog'''], images=lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='''pt''' )
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
_UpperCamelCase = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_UpperCamelCase = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCAmelCase__, atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''', lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__, organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__, organization='''nielsr''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 194 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Optional[Any] = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : int = ''''''.join(freq_to_letter[freq] )
__a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''')
self.register_modules(
speech_model=A__ ,speech_processor=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,unet=A__ ,scheduler=A__ ,feature_extractor=A__ ,)
def A__ ( self ,A__ = "auto"):
if slice_size == "auto":
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__)
def A__ ( self):
self.enable_attention_slicing(A__)
@torch.no_grad()
def __call__( self ,A__ ,A__=1_6_0_0_0 ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 5_0 ,A__ = 7.5 ,A__ = None ,A__ = 1 ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,A__ = None ,A__ = 1 ,**A__ ,):
lowercase = self.speech_processor.feature_extractor(
A__ ,return_tensors='''pt''' ,sampling_rate=A__).input_features.to(self.device)
lowercase = self.speech_model.generate(A__ ,max_length=4_8_0_0_0_0)
lowercase = self.speech_processor.tokenizer.batch_decode(A__ ,skip_special_tokens=A__ ,normalize=A__)[
0
]
if isinstance(A__ ,A__):
lowercase = 1
elif isinstance(A__ ,A__):
lowercase = len(A__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A__)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
# get prompt text embeddings
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 ,A__ ,1)
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,A__ ,-1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 4_2
if negative_prompt is None:
lowercase = [''''''] * batch_size
elif type(A__) is not type(A__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A__)} !='
f' {type(A__)}.')
elif isinstance(A__ ,A__):
lowercase = [negative_prompt]
elif batch_size != len(A__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=A__ ,truncation=A__ ,return_tensors='''pt''' ,)
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(1 ,A__ ,1)
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,A__ ,-1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(
self.device)
else:
lowercase = torch.randn(A__ ,generator=A__ ,device=self.device ,dtype=A__)
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
lowercase = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(A__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
# predict the noise residual
lowercase = self.unet(A__ ,A__ ,encoder_hidden_states=A__).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ ,A__)
lowercase = 1 / 0.18215 * latents
lowercase = self.vae.decode(A__).sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A__ ,nsfw_content_detected=A__)
| 351 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ :Union[str, Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[Any] = ["DPTFeatureExtractor"]
lowercase__ :List[Any] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
_a : List[str] = FileLock(str(tmpdir / """foo.lock""" ) )
_a : Optional[Any] = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
_a : List[str] = time.time()
locka.acquire(UpperCamelCase__ )
assert time.time() - _start > timeout
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = """a""" * 1_0_0_0 + """.lock"""
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : List[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
locka.acquire(0 )
| 294 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a_ = logging.get_logger(__name__)
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->Dict:
'''simple docstring'''
try:
with open(snake_case_ ,'''rb''' ) as flax_state_f:
__A : Optional[Any] = from_bytes(snake_case_ ,flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case_ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : Any ,snake_case_ : Dict ) ->Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__A : int = flatten_dict(jax.tree_util.tree_map(lambda snake_case_ : x.dtype == jnp.bfloataa ,snake_case_ ) ).values()
if any(snake_case_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__A : Tuple = jax.tree_util.tree_map(
lambda snake_case_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,snake_case_ )
__A : Union[str, Any] = ''''''
__A : Optional[int] = flatten_dict(snake_case_ ,sep='''.''' )
__A : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
__A : Any = []
__A : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__A : Optional[Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__A : Any = flax_key_tuple_array[:-1] + ['''weight''']
__A : int = jnp.transpose(snake_case_ ,(3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__A : Optional[int] = flax_key_tuple_array[:-1] + ['''weight''']
__A : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__A : List[Any] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case_ ):
__A : int = (
flax_key_tuple_string.replace('''_0''' ,'''.0''' )
.replace('''_1''' ,'''.1''' )
.replace('''_2''' ,'''.2''' )
.replace('''_3''' ,'''.3''' )
.replace('''_4''' ,'''.4''' )
.replace('''_5''' ,'''.5''' )
.replace('''_6''' ,'''.6''' )
.replace('''_7''' ,'''.7''' )
.replace('''_8''' ,'''.8''' )
.replace('''_9''' ,'''.9''' )
)
__A : str = '''.'''.join(snake_case_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__A : List[Any] = np.asarray(snake_case_ ) if not isinstance(snake_case_ ,np.ndarray ) else flax_tensor
__A : Optional[Any] = torch.from_numpy(snake_case_ )
# remove from missing keys
missing_keys.remove(snake_case_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case_ )
pt_model.load_state_dict(snake_case_ )
# re-transform missing_keys to list
__A : Any = list(snake_case_ )
if len(snake_case_ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(snake_case_ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 291 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ( snake_case_ : str ) ->dict[str, int]:
'''simple docstring'''
__A : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( snake_case_ : tuple ) ->str:
'''simple docstring'''
return x[0]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_letter_count(snake_case_ )
__A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
__A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=snake_case_ )
__A : Optional[int] = ''''''.join(freq_to_letter[freq] )
__A : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ ,reverse=snake_case_ )
__A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def __lowercase ( snake_case_ : str ) ->int:
'''simple docstring'''
__A : Any = get_frequency_order(snake_case_ )
__A : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214 |
import unittest
from transformers import DonutProcessor
snake_case_ = '''naver-clova-ix/donut-base'''
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def snake_case_ ( self):
lowercase__ : Dict = DonutProcessor.from_pretrained(a)
def snake_case_ ( self):
lowercase__ : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase__ : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase__ : str = self.processor.tokenajson(a)
self.assertDictEqual(a , a)
| 214 | 1 |
import string
from math import logaa
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = document.translate(
str.maketrans('''''', '''''', string.punctuation ) ).replace('''\n''', '''''' )
UpperCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = corpus.lower().translate(
str.maketrans('''''', '''''', string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase__ = corpus_without_punctuation.split('''\n''' )
UpperCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ), 3 )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
return round(tf * idf, 3 )
| 370 |
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] , _a : int ):
UpperCamelCase__ = data
UpperCamelCase__ = None
UpperCamelCase__ = None
def lowerCamelCase_ ( UpperCamelCase__ : Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase_ ( UpperCamelCase__ : Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase_ ( UpperCamelCase__ : Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase_ ( ): # Main function for testing.
'''simple docstring'''
UpperCamelCase__ = Node(1 )
UpperCamelCase__ = Node(2 )
UpperCamelCase__ = Node(3 )
UpperCamelCase__ = Node(4 )
UpperCamelCase__ = Node(5 )
UpperCamelCase__ = Node(6 )
UpperCamelCase__ = Node(7 )
UpperCamelCase__ = Node(8 )
UpperCamelCase__ = Node(9 )
print(is_full_binary_tree(UpperCamelCase__ ) )
print(depth_of_tree(UpperCamelCase__ ) )
print('''Tree is: ''' )
display(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 35 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE :Union[str, Any] = False
SCREAMING_SNAKE_CASE :Any = True
SCREAMING_SNAKE_CASE :Tuple = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
SCREAMING_SNAKE_CASE :Dict = reader.read()
SCREAMING_SNAKE_CASE :List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE :List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE :Optional[Any] = config[key]
del config[key]
SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
SCREAMING_SNAKE_CASE :Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
SCREAMING_SNAKE_CASE :List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
SCREAMING_SNAKE_CASE :List[Any] = param_value
SCREAMING_SNAKE_CASE :str = True
if not has_changed:
SCREAMING_SNAKE_CASE :List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 15 | 0 |
from datetime import datetime
import requests
def A__ ( lowerCamelCase ) -> bytes:
UpperCamelCase_: Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
UpperCamelCase_: int = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCamelCase ).content
if __name__ == "__main__":
lowerCamelCase_ : int = input("""Enter Video/IGTV url: """).strip()
lowerCamelCase_ : Any = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 223 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase_ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : str=1 ):
UpperCamelCase_: List[str] = tokenizer
UpperCamelCase_: str = dataset
UpperCamelCase_: List[str] = len(snake_case_ ) if n_tasks is None else n_tasks
UpperCamelCase_: str = n_copies
def __iter__( self : Tuple ):
UpperCamelCase_: Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = start_length
UpperCamelCase_: Dict = eof_strings
UpperCamelCase_: List[str] = tokenizer
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase_: Dict = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = re.split("""(%s)""" % """|""".join(lowerCamelCase ) , lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=20 , **lowerCamelCase ) -> int:
UpperCamelCase_: str = defaultdict(lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase ) ):
with torch.no_grad():
UpperCamelCase_: Optional[int] = batch["""ids"""].shape[-1]
UpperCamelCase_: Dict = accelerator.unwrap_model(lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowerCamelCase , **lowerCamelCase )
# each task is generated batch_size times
UpperCamelCase_: Optional[int] = batch["""task_id"""].repeat(lowerCamelCase )
UpperCamelCase_: int = accelerator.pad_across_processes(
lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase_, UpperCamelCase_: Tuple = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase_: Tuple = generated_tokens.cpu().numpy()
UpperCamelCase_: Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ):
gen_token_dict[task].append(lowerCamelCase )
UpperCamelCase_: Dict = [[] for _ in range(lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase_: Any = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
code_gens[task].append(remove_last_block(lowerCamelCase ) )
return code_gens
def A__ ( ) -> Union[str, Any]:
# Setup configuration
UpperCamelCase_: Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_: str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase_: List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase_: Union[str, Any] = """false"""
if args.num_workers is None:
UpperCamelCase_: Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase_: List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase )
# Load model and tokenizer
UpperCamelCase_: Any = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase_: Union[str, Any] = tokenizer.eos_token
UpperCamelCase_: Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase_: Union[str, Any] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ),
}
# Load evaluation dataset and metric
UpperCamelCase_: Any = load_dataset("""openai_humaneval""" )
UpperCamelCase_: Union[str, Any] = load_metric("""code_eval""" )
UpperCamelCase_: Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
UpperCamelCase_: List[Any] = args.n_samples // args.batch_size
UpperCamelCase_: Any = TokenizedDataset(lowerCamelCase , human_eval["""test"""] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase_: Optional[int] = DataLoader(lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase_: List[str] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
UpperCamelCase_, UpperCamelCase_: Dict = accelerator.prepare(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = complete_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , )
if accelerator.is_main_process:
UpperCamelCase_: List[Any] = []
for task in tqdm(range(lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = human_eval["""test"""][task]["""test"""]
UpperCamelCase_: Optional[int] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase_, UpperCamelCase_: str = code_eval_metric.compute(
references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 223 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 139 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 97 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = str(lowerCAmelCase )
return n == n[::-1]
def UpperCamelCase__ ( lowerCAmelCase = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , lowerCAmelCase ):
if is_palindrome(lowerCAmelCase ) and is_palindrome(bin(lowerCAmelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 220 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
_lowerCAmelCase = f"{src_lang}-{tgt_lang}"
_lowerCAmelCase = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowerCAmelCase = os.path.join(lowerCAmelCase , """README.md""" )
print(f"Generating {path}" )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase )
# make sure we are under the root of the project
A__ : Optional[int] =Path(__file__).resolve().parent.parent.parent
A__ : Union[str, Any] =repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A__ , A__ , A__ : Optional[Any] =model_name.split('''-''')
A__ : List[str] =model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 220 | 1 |
"""simple docstring"""
import numpy as np
def a__ ( snake_case__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def a__ ( snake_case__ ) -> np.ndarray:
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
snake_case = TypeVar("""_T""")
class SCREAMING_SNAKE_CASE ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Iterable[_T] | None = None ):
SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [] )
SCREAMING_SNAKE_CASE : list[_T] = []
def __len__( self : Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : str ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def _A ( self : List[Any] , UpperCAmelCase_ : _T ):
self._stacka.append(UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = self._stacka.pop
SCREAMING_SNAKE_CASE : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
def lowercase_ ( _A : Dict , _A : List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase_ ( _A : Tuple , _A : Optional[int] , _A : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : Any = """"""
else:
lowerCamelCase__ : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCamelCase__ : int = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def lowercase_ ( _A : Optional[Any] , _A : Union[str, Any] , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = dct.pop(_lowerCAmelCase )
lowerCamelCase__ : List[str] = val
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _A : Dict , _A : str , _A : str=False ):
"""simple docstring"""
lowerCamelCase__ : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCAmelCase , )
lowerCamelCase__ : Optional[Any] = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=384 , num_labels=1000 )
lowerCamelCase__ : Union[str, Any] = False
# load original model from timm
lowerCamelCase__ : Union[str, Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
lowerCamelCase__ : Any = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ : str = """huggingface/label-files"""
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase__ : Tuple = ViTHybridModel(_lowerCAmelCase ).eval()
else:
lowerCamelCase__ : str = ViTHybridForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
lowerCamelCase__ : int = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
lowerCamelCase__ : List[str] = transform.transforms
lowerCamelCase__ : List[str] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCamelCase__ : Optional[int] = ViTHybridImageProcessor(
do_resize=_lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = transform(_lowerCAmelCase ).unsqueeze(0 )
lowerCamelCase__ : int = processor(_lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
lowerCamelCase__ : List[str] = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
lowerCamelCase__ : Any = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 184 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35 | 0 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCAmelCase : List[Any] =yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
__lowerCAmelCase : Union[str, Any] ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
__lowerCAmelCase : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Optional[Any] ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
__lowerCAmelCase : List[str] ="""\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Optional[Any] =(
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
__lowerCAmelCase : Optional[Any] ="""\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict =(
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
__lowerCAmelCase : List[str] ="""\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
__lowerCAmelCase : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : str ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
__lowerCAmelCase : Tuple ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
__lowerCAmelCase : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
__lowerCAmelCase : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
__lowerCAmelCase : Union[str, Any] ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
__lowerCAmelCase : List[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
__lowerCAmelCase : List[str] ="""The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
__lowerCAmelCase : Tuple =""""""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
__lowerCAmelCase : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Tuple ="""The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
assert ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
lowercase = ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ , suppress_parsing_errors=lowerCAmelCase__ )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
lowercase = ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
lowercase = expected_error.format(path=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ):
lowercase = ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
lowercase = expected_error.format(path=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ):
ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ , suppress_parsing_errors=lowerCAmelCase__ )
| 32 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowercase : List[Any] , lowercase : int=7 , lowercase : List[str]=3 , lowercase : List[str]=18 , lowercase : str=30 , lowercase : Any=400 , lowercase : int=True , lowercase : str=None , lowercase : int=True , ):
"""simple docstring"""
lowercase_ :Tuple = size if size is not None else {"height": 18, "width": 18}
lowercase_ :Dict = parent
lowercase_ :List[str] = batch_size
lowercase_ :str = num_channels
lowercase_ :str = image_size
lowercase_ :str = min_resolution
lowercase_ :List[Any] = max_resolution
lowercase_ :int = do_resize
lowercase_ :Optional[int] = size
lowercase_ :Any = do_normalize
def lowercase__ ( self : str ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = ImageGPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :List[Any] = ImageGPTImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "clusters" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :Any = self.image_processing_class(**self.image_processor_dict )
lowercase_ :List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ :List[str] = os.path.join(lowercase , "image_processor.json" )
image_processor_first.to_json_file(lowercase )
lowercase_ :Tuple = self.image_processing_class.from_json_file(lowercase ).to_dict()
lowercase_ :Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[str] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase )
lowercase_ :Union[str, Any] = self.image_processing_class.from_pretrained(lowercase ).to_dict()
lowercase_ :Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowercase__ ( self : Any ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( ):
lowercase_ :Union[str, Any] = load_dataset("hf-internal-testing/fixtures_image_utils" ,split="test" )
lowercase_ :Tuple = Image.open(dataset[4]["file"] )
lowercase_ :Any = Image.open(dataset[5]["file"] )
lowercase_ :Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :int = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowercase_ :Tuple = prepare_images()
# test non-batched
lowercase_ :Tuple = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowercase_ :Optional[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase )
# test batched
lowercase_ :Union[str, Any] = image_processing(lowercase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowercase_ :int = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase )
| 223 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ):
lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ :str = padding_side
return tokenizer(
[line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,):
lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ):
"""simple docstring"""
super().__init__()
lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" )
lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" )
lowercase_ :Optional[int] = self.get_char_lens(self.src_file )
lowercase_ :List[str] = max_source_length
lowercase_ :str = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ :int = tokenizer
lowercase_ :Dict = prefix
if n_obs is not None:
lowercase_ :Union[str, Any] = self.src_lens[:n_obs]
lowercase_ :Optional[int] = src_lang
lowercase_ :str = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = index + 1 # linecache starts at 1
lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" )
lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ :List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" )
lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" )
lowercase_ :Dict = source_inputs["input_ids"].squeeze()
lowercase_ :Tuple = target_inputs["input_ids"].squeeze()
lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowercase : Union[str, Any] ):
"""simple docstring"""
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase )
lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowercase_ :Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase : List[str] =getLogger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = get_git_info()
save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ :List[str] = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ):
return list(map(__lowerCamelCase ,__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"wb" ) as f:
return pickle.dump(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : Optional[int] ):
return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase )
def white_space_fix(__lowerCamelCase : Dict ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowercase_ :Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ):
lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ :Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ :Any = 0
for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ):
em += exact_match_score(__lowerCamelCase ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
lowercase_ :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ :List[str] = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
continue
lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
return hparams, config
| 223 | 1 |
import collections
import os
import re
from pathlib import Path
__lowercase = "src/transformers"
# Matches is_xxx_available()
__lowercase = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowercase = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowercase = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowercase = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowercase = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowercase = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowercase = re.compile(r'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowercase = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowercase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowercase = re.compile(r'''^\s*try:''')
# Catches a line with else:
__lowercase = re.compile(r'''^\s*else:''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__UpperCamelCase :Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCamelCase :int = f.readlines()
__UpperCamelCase :Dict = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCamelCase :Optional[int] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__UpperCamelCase :int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__UpperCamelCase :Dict = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__UpperCamelCase :Any = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__UpperCamelCase :Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
__UpperCamelCase :Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCamelCase :int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase :Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase :Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__UpperCamelCase :str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__UpperCamelCase :Optional[int] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(''', ''' )
__UpperCamelCase :Optional[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__UpperCamelCase :Optional[int] = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(''', ''' )
__UpperCamelCase :str = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''\"''' ):
objects.append(line[13:-3] )
line_index += 1
__UpperCamelCase :Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCamelCase :List[Any] = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__UpperCamelCase :List[Any] = lines[line_index]
__UpperCamelCase :str = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCamelCase :Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCamelCase :List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase :List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase :Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__UpperCamelCase :Tuple = lines[line_index]
__UpperCamelCase :Union[str, Any] = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCamelCase :Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCamelCase :Tuple = []
for key in import_dict_objects.keys():
__UpperCamelCase :Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__UpperCamelCase :Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCamelCase :Dict = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Dict = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__UpperCamelCase :List[str] = os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' )
__UpperCamelCase :Optional[Any] = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__UpperCamelCase :str = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCamelCase :str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__UpperCamelCase :Tuple = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__UpperCamelCase :Optional[Any] = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :List[Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
__lowercase = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
__UpperCamelCase :List[str] = direct_transformers_import(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' ) as f:
__UpperCamelCase :List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE ) ) )
__UpperCamelCase :List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCamelCase :Union[str, Any] = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 360 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """gptj"""
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=50_400 , __lowercase=2_048 , __lowercase=4_096 , __lowercase=28 , __lowercase=16 , __lowercase=64 , __lowercase=None , __lowercase="gelu_new" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=50_256 , __lowercase=50_256 , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :Optional[int] = n_positions
__UpperCamelCase :Tuple = n_embd
__UpperCamelCase :int = n_layer
__UpperCamelCase :Any = n_head
__UpperCamelCase :Any = n_inner
__UpperCamelCase :Dict = rotary_dim
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[Any] = resid_pdrop
__UpperCamelCase :Any = embd_pdrop
__UpperCamelCase :List[str] = attn_pdrop
__UpperCamelCase :str = layer_norm_epsilon
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Dict = use_cache
__UpperCamelCase :List[Any] = bos_token_id
__UpperCamelCase :Tuple = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ) -> Any:
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase)
if not getattr(self._config , '''pad_token_id''' , __lowercase):
# TODO: how to do that better?
__UpperCamelCase :Tuple = 0
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase :Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''')
__UpperCamelCase :str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase :Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = super(__lowercase , self).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
# We need to order the input in the way they appears in the forward()
__UpperCamelCase :int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :List[str] = seqlen + 2
__UpperCamelCase :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase :Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
__UpperCamelCase :Tuple = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase :Tuple = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase__ ( self) -> int:
return 13
| 105 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Dict = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_00_00_00 ):
'''simple docstring'''
lowercase = []
lowercase , lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
lowercase , lowercase = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 | 1 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case ( ) -> Dict:
_A = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A__)
_A = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=A__)
env_command_parser(subparsers=A__)
launch_command_parser(subparsers=A__)
tpu_command_parser(subparsers=A__)
test_command_parser(subparsers=A__)
# Let's go
_A = parser.parse_args()
if not hasattr(A__ , """func"""):
parser.print_help()
exit(1)
# Run
args.func(A__)
if __name__ == "__main__":
main()
| 350 |
import requests
from bsa import BeautifulSoup
def snake_case ( snake_case__ :str = "AAPL") -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(snake_case__).text , """html.parser""")
_A = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_).find("""span""").text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 81 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any]=None )-> Any:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ : Optional[int] = subparsers.add_parser("test" )
else:
UpperCAmelCase__ : Any = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
UpperCAmelCase__ : Optional[int] = script_name
else:
UpperCAmelCase__ : Optional[Any] = f'--config_file={args.config_file} {script_name}'
UpperCAmelCase__ : Optional[Any] = ["accelerate-launch"] + test_args.split()
UpperCAmelCase__ : str = execute_subprocess_async(snake_case , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : int = test_command_parser()
UpperCAmelCase__ : List[Any] = parser.parse_args()
test_command(snake_case )
if __name__ == "__main__":
main()
| 298 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = TextToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'np'
a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : Optional[Any] = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Tuple = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32 |
import math
import flax.linen as nn
import jax.numpy as jnp
def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a_ : int = float(embedding_dim // 2 )
a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment )
a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 )
# scale embeddings
a_ : str = scale * emb
if flip_sin_to_cos:
a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 )
else:
a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 )
a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int = 32
snake_case__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ )
a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ )
return temb
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int = 32
snake_case__ : bool = False
snake_case__ : float = 1
@nn.compact
def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 32 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]:
'''simple docstring'''
A__ = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
A__ = torch.tensor([[1, 2]],device=lowercase_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=lowercase_,bbox=lowercase_ )
A__ = torch.Size([1, 2, 7_6_8] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,)
self.assertTrue(outputs.last_hidden_state.shape,lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
| 282 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
while b:
__lowerCamelCase , __lowerCamelCase : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 73 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a : Tuple = precision
a : str = ceil(precision / 14 )
a : List[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
a : Union[str, Any] = 1
a : Dict = 1359_1409
a : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
a : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 105 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Any = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323 | 1 |
import random
from typing import Any
def lowercase_ ( _lowerCamelCase : list):
for _ in range(len(_lowerCamelCase)):
lowercase__ : Dict = random.randint(0 , len(_lowerCamelCase) - 1)
lowercase__ : Union[str, Any] = random.randint(0 , len(_lowerCamelCase) - 1)
lowercase__ , lowercase__ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 87 |
"""simple docstring"""
from math import factorial
def _A ( lowercase = 1_00 ):
"""simple docstring"""
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 81 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( ):
_UpperCAmelCase : List[Any] = [randint(-1000 , 1000 ) for i in range(10 )]
_UpperCAmelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
lowerCAmelCase_ : Tuple = make_dataset()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
for triplet in permutations(lowerCAmelCase_ , 3 ):
if sum(lowerCAmelCase_ ) == target:
return tuple(sorted(lowerCAmelCase_ ) )
return (0, 0, 0)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
arr.sort()
_UpperCAmelCase : str = len(lowerCAmelCase_ )
for i in range(n - 1 ):
_UpperCAmelCase : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( ):
_UpperCAmelCase : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_UpperCAmelCase : List[str] = """
triplet_sum1(*dataset)
"""
_UpperCAmelCase : Dict = """
triplet_sum2(*dataset)
"""
_UpperCAmelCase : Tuple = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=1_0000 )
_UpperCAmelCase : Dict = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=1_0000 )
return (min(lowerCAmelCase_ ), min(lowerCAmelCase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ : int = solution_times()
print(F"The time for naive implementation is {times[0]}.")
print(F"The time for optimized implementation is {times[1]}.")
| 353 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A :
'''simple docstring'''
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
return None
class A :
'''simple docstring'''
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
return None
class A ( unittest.TestCase ):
'''simple docstring'''
A = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a_ (self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase , "tf" , 1_2 , **_UpperCAmelCase )
@require_torch
@slow
def a_ (self ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase , "pt" , 1_2 , **_UpperCAmelCase )
@require_torch
@slow
def a_ (self ) -> Optional[int]:
from transformers import BertModel
__UpperCamelCase : List[str] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_UpperCAmelCase ) )
vocab_file.flush()
__UpperCamelCase : int = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__UpperCamelCase : Optional[int] = BertModel(BertConfig(vocab_size=len(_UpperCAmelCase ) ) )
model.save_pretrained(_UpperCAmelCase )
self._test_export(_UpperCAmelCase , "pt" , 1_2 , _UpperCAmelCase )
@require_tf
@slow
def a_ (self ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__UpperCamelCase : Optional[Any] = self._test_export(_UpperCAmelCase , "tf" , 1_2 , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = quantize(Path(_UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def a_ (self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__UpperCamelCase : List[str] = self._test_export(_UpperCAmelCase , "pt" , 1_2 , **_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = quantize(_UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Tuple:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__UpperCamelCase : Tuple = Path(_UpperCAmelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return path
except Exception as e:
self.fail(_UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def a_ (self ) -> Union[str, Any]:
from transformers import BertModel
__UpperCamelCase : Tuple = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__UpperCamelCase : int = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def a_ (self ) -> str:
from transformers import TFBertModel
__UpperCamelCase : int = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__UpperCamelCase : Tuple = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , "tf" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : List[str] = FeatureExtractionPipeline(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Any = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = infer_shapes(_UpperCAmelCase , _UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , _UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask", "token_type_ids"]
__UpperCamelCase : List[str] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
__UpperCamelCase , __UpperCamelCase : Tuple = ensure_valid_input(FuncContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_UpperCAmelCase ) , set(_UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = ensure_valid_input(FuncNonContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_UpperCAmelCase ) , 1 )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 298 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowercase =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__lowercase =[[0.0, 0.0], [0.0, 0.0]]
__lowercase , __lowercase =matrix[1][1], matrix[0][0]
__lowercase , __lowercase =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowercase =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__lowercase =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowercase =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowercase =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowercase =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowercase =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowercase =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowercase =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowercase =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowercase =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowercase =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowercase =array(_lowerCAmelCase )
for i in range(3 ):
for j in range(3 ):
__lowercase =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowercase =array(_lowerCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 353 |
'''simple docstring'''
from math import factorial
def _A ( _lowerCAmelCase = 20 ):
"""simple docstring"""
__lowercase =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowercase =n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 48 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case = Vector()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase ) , '(0,0,0,0,0,1)' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase ) , 4 )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = Vector([1, 2] )
_snake_case = Vector([1, 2, 3, 4, 5] )
_snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([2, -1, 4] ) # for test of dot product
_snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def A ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def A ( self : List[str] ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase , lowercase ) ) , '(3,4,7)' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = Vector([1, 0, 0, 0, 0, 0] )
_snake_case = x.copy()
self.assertEqual(str(lowercase ) , str(lowercase ) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase ) , '(0,1,0)' )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowercase ) )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase , lowercase ) )
def A ( self : Any ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase , lowercase ) )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : int ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def A ( self : Any ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowercase ) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : int ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def A ( self : int ):
'''simple docstring'''
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def A ( self : str ):
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase )
| 282 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple ) -> int:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
__lowerCAmelCase : Any = [[float("""inf""" ) for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowerCAmelCase : Tuple = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
_UpperCAmelCase = int(input('Enter number of vertices: '))
_UpperCAmelCase = int(input('Enter number of edges: '))
_UpperCAmelCase = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
_UpperCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
_UpperCAmelCase = int(input('Enter source:'))
_UpperCAmelCase = int(input('Enter destination:'))
_UpperCAmelCase = float(input('Enter weight:'))
_UpperCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 232 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 232 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
"""simple docstring"""
from __future__ import annotations
_A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> tuple[list[list[int]], list[list[int]]]:
lowerCAmelCase__ : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
lowerCAmelCase__ : Optional[Any] = init[0]
lowerCAmelCase__ : Any = init[1]
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Dict = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCAmelCase__ : Dict = [[f, g, x, y]]
lowerCAmelCase__ : List[Any] = False # flag that is set when search is complete
lowerCAmelCase__ : List[str] = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCAmelCase__ : List[Any] = cell.pop()
lowerCAmelCase__ : Optional[int] = next_cell[2]
lowerCAmelCase__ : Tuple = next_cell[3]
lowerCAmelCase__ : Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCAmelCase__ : Tuple = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
lowerCAmelCase__ : List[Any] = x + DIRECTIONS[i][0]
lowerCAmelCase__ : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCAmelCase__ : str = g + cost
lowerCAmelCase__ : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : Any = i
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[Any] = goal[0]
lowerCAmelCase__ : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCAmelCase__ : Optional[Any] = x - DIRECTIONS[action[x][y]][0]
lowerCAmelCase__ : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
lowerCAmelCase__ : int = xa
lowerCAmelCase__ : int = ya
invpath.append([x, y] )
lowerCAmelCase__ : List[Any] = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
_A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_A = [0, 0]
# all coordinates are given in format [y,x]
_A = [len(grid) - 1, len(grid[0]) - 1]
_A = 1
# the cost map which pushes the path closer to the goal
_A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_A = 9_9
_A , _A = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 212 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowerCAmelCase__ : int = TaConfig.from_json_file(__UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 212 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : str = credit_card_number
A : Any = 0
A : List[Any] = len(_lowercase ) - 2
for i in range(_lowercase , -1 , -2 ):
# double the value of every second digit
A : int = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A : int = cc_number[:i] + str(_lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Dict = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_lowercase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_lowercase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_lowercase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 116 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Tuple ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = "swinv2"
__lowerCAmelCase :List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , **__lowercase , ) -> Any:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Optional[Any] = image_size
a__ : Union[str, Any] = patch_size
a__ : List[Any] = num_channels
a__ : Union[str, Any] = embed_dim
a__ : Any = depths
a__ : List[str] = len(__lowercase )
a__ : Optional[Any] = num_heads
a__ : Union[str, Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : List[str] = qkv_bias
a__ : Dict = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : List[Any] = drop_path_rate
a__ : Tuple = hidden_act
a__ : Dict = use_absolute_embeddings
a__ : Tuple = layer_norm_eps
a__ : Tuple = initializer_range
a__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : int = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
a__ : Dict = (0, 0, 0, 0)
| 170 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# initialize interval's left pointer and right pointer
_UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_UpperCAmelCase : Tuple = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_UpperCAmelCase : List[str] = min_edge
while go_next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = i, i + z_result[i] - 1
return z_result
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_UpperCAmelCase : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.